vendor: golang.org/x/net v0.0.0-20200707034311-ab3426394381
full diff: f3200d17e0...ab34263943
Worth mentioning that there's a comment updated in golang.org/x/net/websocket:
This package currently lacks some features found in alternative
and more actively maintained WebSocket packages:
https://godoc.org/github.com/gorilla/websocket
https://godoc.org/nhooyr.io/websocket
It's used in k8s.io/apiserver/pkg/util/wsstream/stream.go, so perhaps that should
be reviewed if the alternatives are better for how it's used.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
parent
5a9a6a145e
commit
f80b4dc586
@ -45,7 +45,7 @@ github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1
|
||||
github.com/urfave/cli v1.22.1 # NOTE: urfave/cli must be <= v1.22.1 due to a regression: https://github.com/urfave/cli/issues/1092
|
||||
go.etcd.io/bbolt v1.3.5
|
||||
go.opencensus.io v0.22.0
|
||||
golang.org/x/net f3200d17e092c607f615320ecaad13d87ad9a2b3
|
||||
golang.org/x/net ab34263943818b32f575efc978a3d24e80b04bd7
|
||||
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
||||
golang.org/x/sys 9dae0f8f577553e0f21298e18926efc9644c281d
|
||||
golang.org/x/text v0.3.3
|
||||
|
6
vendor/golang.org/x/net/go.mod
generated
vendored
6
vendor/golang.org/x/net/go.mod
generated
vendored
@ -1,7 +1,9 @@
|
||||
module golang.org/x/net
|
||||
|
||||
go 1.11
|
||||
|
||||
require (
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd
|
||||
golang.org/x/text v0.3.0
|
||||
)
|
||||
|
8
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
8
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
@ -107,6 +107,7 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
|
||||
|
||||
// dialCall is an in-flight Transport dial call to a host.
|
||||
type dialCall struct {
|
||||
_ incomparable
|
||||
p *clientConnPool
|
||||
done chan struct{} // closed when done
|
||||
res *ClientConn // valid after done is closed
|
||||
@ -180,6 +181,7 @@ func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn)
|
||||
}
|
||||
|
||||
type addConnCall struct {
|
||||
_ incomparable
|
||||
p *clientConnPool
|
||||
done chan struct{} // closed when done
|
||||
err error
|
||||
@ -200,12 +202,6 @@ func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
|
||||
close(c.done)
|
||||
}
|
||||
|
||||
func (p *clientConnPool) addConn(key string, cc *ClientConn) {
|
||||
p.mu.Lock()
|
||||
p.addConnLocked(key, cc)
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// p.mu must be held
|
||||
func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
|
||||
for _, v := range p.conns[key] {
|
||||
|
2
vendor/golang.org/x/net/http2/flow.go
generated
vendored
2
vendor/golang.org/x/net/http2/flow.go
generated
vendored
@ -8,6 +8,8 @@ package http2
|
||||
|
||||
// flow is the flow control window's size.
|
||||
type flow struct {
|
||||
_ incomparable
|
||||
|
||||
// n is the number of DATA bytes we're allowed to send.
|
||||
// A flow is kept both on a conn and a per-stream.
|
||||
n int32
|
||||
|
2
vendor/golang.org/x/net/http2/hpack/encode.go
generated
vendored
2
vendor/golang.org/x/net/http2/hpack/encode.go
generated
vendored
@ -150,7 +150,7 @@ func appendIndexed(dst []byte, i uint64) []byte {
|
||||
// extended buffer.
|
||||
//
|
||||
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
||||
// f.Sensitive is false and indexing is true, "Inremental Indexing"
|
||||
// f.Sensitive is false and indexing is true, "Incremental Indexing"
|
||||
// representation is used.
|
||||
func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
|
||||
dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
|
||||
|
7
vendor/golang.org/x/net/http2/hpack/huffman.go
generated
vendored
7
vendor/golang.org/x/net/http2/hpack/huffman.go
generated
vendored
@ -105,7 +105,14 @@ func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// incomparable is a zero-width, non-comparable type. Adding it to a struct
|
||||
// makes that struct also non-comparable, and generally doesn't add
|
||||
// any size (as long as it's first).
|
||||
type incomparable [0]func()
|
||||
|
||||
type node struct {
|
||||
_ incomparable
|
||||
|
||||
// children is non-nil for internal nodes
|
||||
children *[256]*node
|
||||
|
||||
|
13
vendor/golang.org/x/net/http2/http2.go
generated
vendored
13
vendor/golang.org/x/net/http2/http2.go
generated
vendored
@ -19,7 +19,6 @@ package http2 // import "golang.org/x/net/http2"
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@ -173,11 +172,6 @@ func (s SettingID) String() string {
|
||||
return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
|
||||
}
|
||||
|
||||
var (
|
||||
errInvalidHeaderFieldName = errors.New("http2: invalid header field name")
|
||||
errInvalidHeaderFieldValue = errors.New("http2: invalid header field value")
|
||||
)
|
||||
|
||||
// validWireHeaderFieldName reports whether v is a valid header field
|
||||
// name (key). See httpguts.ValidHeaderName for the base rules.
|
||||
//
|
||||
@ -247,6 +241,7 @@ func (cw closeWaiter) Wait() {
|
||||
// Its buffered writer is lazily allocated as needed, to minimize
|
||||
// idle memory usage with many connections.
|
||||
type bufferedWriter struct {
|
||||
_ incomparable
|
||||
w io.Writer // immutable
|
||||
bw *bufio.Writer // non-nil when data is buffered
|
||||
}
|
||||
@ -319,6 +314,7 @@ func bodyAllowedForStatus(status int) bool {
|
||||
}
|
||||
|
||||
type httpError struct {
|
||||
_ incomparable
|
||||
msg string
|
||||
timeout bool
|
||||
}
|
||||
@ -382,3 +378,8 @@ func (s *sorter) SortStrings(ss []string) {
|
||||
func validPseudoPath(v string) bool {
|
||||
return (len(v) > 0 && v[0] == '/') || v == "*"
|
||||
}
|
||||
|
||||
// incomparable is a zero-width, non-comparable type. Adding it to a struct
|
||||
// makes that struct also non-comparable, and generally doesn't add
|
||||
// any size (as long as it's first).
|
||||
type incomparable [0]func()
|
||||
|
7
vendor/golang.org/x/net/http2/pipe.go
generated
vendored
7
vendor/golang.org/x/net/http2/pipe.go
generated
vendored
@ -17,6 +17,7 @@ type pipe struct {
|
||||
mu sync.Mutex
|
||||
c sync.Cond // c.L lazily initialized to &p.mu
|
||||
b pipeBuffer // nil when done reading
|
||||
unread int // bytes unread when done
|
||||
err error // read error once empty. non-nil means closed.
|
||||
breakErr error // immediate read error (caller doesn't see rest of b)
|
||||
donec chan struct{} // closed on error
|
||||
@ -33,7 +34,7 @@ func (p *pipe) Len() int {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.b == nil {
|
||||
return 0
|
||||
return p.unread
|
||||
}
|
||||
return p.b.Len()
|
||||
}
|
||||
@ -80,6 +81,7 @@ func (p *pipe) Write(d []byte) (n int, err error) {
|
||||
return 0, errClosedPipeWrite
|
||||
}
|
||||
if p.breakErr != nil {
|
||||
p.unread += len(d)
|
||||
return len(d), nil // discard when there is no reader
|
||||
}
|
||||
return p.b.Write(d)
|
||||
@ -117,6 +119,9 @@ func (p *pipe) closeWithError(dst *error, err error, fn func()) {
|
||||
}
|
||||
p.readFn = fn
|
||||
if dst == &p.breakErr {
|
||||
if p.b != nil {
|
||||
p.unread += p.b.Len()
|
||||
}
|
||||
p.b = nil
|
||||
}
|
||||
*dst = err
|
||||
|
87
vendor/golang.org/x/net/http2/server.go
generated
vendored
87
vendor/golang.org/x/net/http2/server.go
generated
vendored
@ -56,6 +56,7 @@ const (
|
||||
firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
|
||||
handlerChunkWriteSize = 4 << 10
|
||||
defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
|
||||
maxQueuedControlFrames = 10000
|
||||
)
|
||||
|
||||
var (
|
||||
@ -163,6 +164,15 @@ func (s *Server) maxConcurrentStreams() uint32 {
|
||||
return defaultMaxStreams
|
||||
}
|
||||
|
||||
// maxQueuedControlFrames is the maximum number of control frames like
|
||||
// SETTINGS, PING and RST_STREAM that will be queued for writing before
|
||||
// the connection is closed to prevent memory exhaustion attacks.
|
||||
func (s *Server) maxQueuedControlFrames() int {
|
||||
// TODO: if anybody asks, add a Server field, and remember to define the
|
||||
// behavior of negative values.
|
||||
return maxQueuedControlFrames
|
||||
}
|
||||
|
||||
type serverInternalState struct {
|
||||
mu sync.Mutex
|
||||
activeConns map[*serverConn]struct{}
|
||||
@ -242,7 +252,7 @@ func ConfigureServer(s *http.Server, conf *Server) error {
|
||||
}
|
||||
}
|
||||
if !haveRequired {
|
||||
return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.")
|
||||
return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).")
|
||||
}
|
||||
}
|
||||
|
||||
@ -273,7 +283,20 @@ func ConfigureServer(s *http.Server, conf *Server) error {
|
||||
if testHookOnConn != nil {
|
||||
testHookOnConn()
|
||||
}
|
||||
// The TLSNextProto interface predates contexts, so
|
||||
// the net/http package passes down its per-connection
|
||||
// base context via an exported but unadvertised
|
||||
// method on the Handler. This is for internal
|
||||
// net/http<=>http2 use only.
|
||||
var ctx context.Context
|
||||
type baseContexter interface {
|
||||
BaseContext() context.Context
|
||||
}
|
||||
if bc, ok := h.(baseContexter); ok {
|
||||
ctx = bc.BaseContext()
|
||||
}
|
||||
conf.ServeConn(c, &ServeConnOpts{
|
||||
Context: ctx,
|
||||
Handler: h,
|
||||
BaseConfig: hs,
|
||||
})
|
||||
@ -284,6 +307,10 @@ func ConfigureServer(s *http.Server, conf *Server) error {
|
||||
|
||||
// ServeConnOpts are options for the Server.ServeConn method.
|
||||
type ServeConnOpts struct {
|
||||
// Context is the base context to use.
|
||||
// If nil, context.Background is used.
|
||||
Context context.Context
|
||||
|
||||
// BaseConfig optionally sets the base configuration
|
||||
// for values. If nil, defaults are used.
|
||||
BaseConfig *http.Server
|
||||
@ -294,6 +321,13 @@ type ServeConnOpts struct {
|
||||
Handler http.Handler
|
||||
}
|
||||
|
||||
func (o *ServeConnOpts) context() context.Context {
|
||||
if o != nil && o.Context != nil {
|
||||
return o.Context
|
||||
}
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func (o *ServeConnOpts) baseConfig() *http.Server {
|
||||
if o != nil && o.BaseConfig != nil {
|
||||
return o.BaseConfig
|
||||
@ -439,7 +473,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
||||
}
|
||||
|
||||
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) {
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
ctx, cancel = context.WithCancel(opts.context())
|
||||
ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
|
||||
if hs := opts.baseConfig(); hs != nil {
|
||||
ctx = context.WithValue(ctx, http.ServerContextKey, hs)
|
||||
@ -482,6 +516,7 @@ type serverConn struct {
|
||||
sawFirstSettings bool // got the initial SETTINGS frame after the preface
|
||||
needToSendSettingsAck bool
|
||||
unackedSettings int // how many SETTINGS have we sent without ACKs?
|
||||
queuedControlFrames int // control frames in the writeSched queue
|
||||
clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
|
||||
advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
|
||||
curClientStreams uint32 // number of open streams initiated by the client
|
||||
@ -550,9 +585,6 @@ type stream struct {
|
||||
declBodyBytes int64 // or -1 if undeclared
|
||||
flow flow // limits writing from Handler to client
|
||||
inflow flow // what the client is allowed to POST/etc to us
|
||||
parent *stream // or nil
|
||||
numTrailerValues int64
|
||||
weight uint8
|
||||
state streamState
|
||||
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
|
||||
gotTrailerHeader bool // HEADER frame for trailers was seen
|
||||
@ -729,6 +761,7 @@ func (sc *serverConn) readFrames() {
|
||||
|
||||
// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
|
||||
type frameWriteResult struct {
|
||||
_ incomparable
|
||||
wr FrameWriteRequest // what was written (or attempted)
|
||||
err error // result of the writeFrame call
|
||||
}
|
||||
@ -739,7 +772,7 @@ type frameWriteResult struct {
|
||||
// serverConn.
|
||||
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) {
|
||||
err := wr.write.writeFrame(sc)
|
||||
sc.wroteFrameCh <- frameWriteResult{wr, err}
|
||||
sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err}
|
||||
}
|
||||
|
||||
func (sc *serverConn) closeAllStreamsOnConnClose() {
|
||||
@ -870,6 +903,14 @@ func (sc *serverConn) serve() {
|
||||
}
|
||||
}
|
||||
|
||||
// If the peer is causing us to generate a lot of control frames,
|
||||
// but not reading them from us, assume they are trying to make us
|
||||
// run out of memory.
|
||||
if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() {
|
||||
sc.vlogf("http2: too many control frames in send queue, closing connection")
|
||||
return
|
||||
}
|
||||
|
||||
// Start the shutdown timer after sending a GOAWAY. When sending GOAWAY
|
||||
// with no error code (graceful shutdown), don't start the timer until
|
||||
// all open streams have been completed.
|
||||
@ -1069,6 +1110,14 @@ func (sc *serverConn) writeFrame(wr FrameWriteRequest) {
|
||||
}
|
||||
|
||||
if !ignoreWrite {
|
||||
if wr.isControl() {
|
||||
sc.queuedControlFrames++
|
||||
// For extra safety, detect wraparounds, which should not happen,
|
||||
// and pull the plug.
|
||||
if sc.queuedControlFrames < 0 {
|
||||
sc.conn.Close()
|
||||
}
|
||||
}
|
||||
sc.writeSched.Push(wr)
|
||||
}
|
||||
sc.scheduleFrameWrite()
|
||||
@ -1113,7 +1162,7 @@ func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
|
||||
if wr.write.staysWithinBuffer(sc.bw.Available()) {
|
||||
sc.writingFrameAsync = false
|
||||
err := wr.write.writeFrame(sc)
|
||||
sc.wroteFrame(frameWriteResult{wr, err})
|
||||
sc.wroteFrame(frameWriteResult{wr: wr, err: err})
|
||||
} else {
|
||||
sc.writingFrameAsync = true
|
||||
go sc.writeFrameAsync(wr)
|
||||
@ -1186,10 +1235,8 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
|
||||
// If a frame is already being written, nothing happens. This will be called again
|
||||
// when the frame is done being written.
|
||||
//
|
||||
// If a frame isn't being written we need to send one, the best frame
|
||||
// to send is selected, preferring first things that aren't
|
||||
// stream-specific (e.g. ACKing settings), and then finding the
|
||||
// highest priority stream.
|
||||
// If a frame isn't being written and we need to send one, the best frame
|
||||
// to send is selected by writeSched.
|
||||
//
|
||||
// If a frame isn't being written and there's nothing else to send, we
|
||||
// flush the write buffer.
|
||||
@ -1217,6 +1264,9 @@ func (sc *serverConn) scheduleFrameWrite() {
|
||||
}
|
||||
if !sc.inGoAway || sc.goAwayCode == ErrCodeNo {
|
||||
if wr, ok := sc.writeSched.Pop(); ok {
|
||||
if wr.isControl() {
|
||||
sc.queuedControlFrames--
|
||||
}
|
||||
sc.startFrameWrite(wr)
|
||||
continue
|
||||
}
|
||||
@ -1509,6 +1559,8 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error {
|
||||
if err := f.ForeachSetting(sc.processSetting); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: judging by RFC 7540, Section 6.5.3 each SETTINGS frame should be
|
||||
// acknowledged individually, even if multiple are received before the ACK.
|
||||
sc.needToSendSettingsAck = true
|
||||
sc.scheduleFrameWrite()
|
||||
return nil
|
||||
@ -2006,7 +2058,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r
|
||||
var trailer http.Header
|
||||
for _, v := range rp.header["Trailer"] {
|
||||
for _, key := range strings.Split(v, ",") {
|
||||
key = http.CanonicalHeaderKey(strings.TrimSpace(key))
|
||||
key = http.CanonicalHeaderKey(textproto.TrimString(key))
|
||||
switch key {
|
||||
case "Transfer-Encoding", "Trailer", "Content-Length":
|
||||
// Bogus. (copy of http1 rules)
|
||||
@ -2224,6 +2276,7 @@ func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
|
||||
// requestBody is the Handler's Request.Body type.
|
||||
// Read and Close may be called concurrently.
|
||||
type requestBody struct {
|
||||
_ incomparable
|
||||
stream *stream
|
||||
conn *serverConn
|
||||
closed bool // for use by Close only
|
||||
@ -2361,7 +2414,11 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
|
||||
clen = strconv.Itoa(len(p))
|
||||
}
|
||||
_, hasContentType := rws.snapHeader["Content-Type"]
|
||||
if !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 {
|
||||
// If the Content-Encoding is non-blank, we shouldn't
|
||||
// sniff the body. See Issue golang.org/issue/31753.
|
||||
ce := rws.snapHeader.Get("Content-Encoding")
|
||||
hasCE := len(ce) > 0
|
||||
if !hasCE && !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 {
|
||||
ctype = http.DetectContentType(p)
|
||||
}
|
||||
var date string
|
||||
@ -2470,7 +2527,7 @@ const TrailerPrefix = "Trailer:"
|
||||
// trailers. That worked for a while, until we found the first major
|
||||
// user of Trailers in the wild: gRPC (using them only over http2),
|
||||
// and gRPC libraries permit setting trailers mid-stream without
|
||||
// predeclarnig them. So: change of plans. We still permit the old
|
||||
// predeclaring them. So: change of plans. We still permit the old
|
||||
// way, but we also permit this hack: if a Header() key begins with
|
||||
// "Trailer:", the suffix of that key is a Trailer. Because ':' is an
|
||||
// invalid token byte anyway, there is no ambiguity. (And it's already
|
||||
@ -2770,7 +2827,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) {
|
||||
// PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
|
||||
// is in either the "open" or "half-closed (remote)" state.
|
||||
if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote {
|
||||
// responseWriter.Push checks that the stream is peer-initiaed.
|
||||
// responseWriter.Push checks that the stream is peer-initiated.
|
||||
msg.done <- errStreamClosed
|
||||
return
|
||||
}
|
||||
|
172
vendor/golang.org/x/net/http2/transport.go
generated
vendored
172
vendor/golang.org/x/net/http2/transport.go
generated
vendored
@ -93,7 +93,7 @@ type Transport struct {
|
||||
// send in the initial settings frame. It is how many bytes
|
||||
// of response headers are allowed. Unlike the http2 spec, zero here
|
||||
// means to use a default limit (currently 10MB). If you actually
|
||||
// want to advertise an ulimited value to the peer, Transport
|
||||
// want to advertise an unlimited value to the peer, Transport
|
||||
// interprets the highest possible value here (0xffffffff or 1<<32-1)
|
||||
// to mean no limit.
|
||||
MaxHeaderListSize uint32
|
||||
@ -108,6 +108,19 @@ type Transport struct {
|
||||
// waiting for their turn.
|
||||
StrictMaxConcurrentStreams bool
|
||||
|
||||
// ReadIdleTimeout is the timeout after which a health check using ping
|
||||
// frame will be carried out if no frame is received on the connection.
|
||||
// Note that a ping response will is considered a received frame, so if
|
||||
// there is no other traffic on the connection, the health check will
|
||||
// be performed every ReadIdleTimeout interval.
|
||||
// If zero, no health check is performed.
|
||||
ReadIdleTimeout time.Duration
|
||||
|
||||
// PingTimeout is the timeout after which the connection will be closed
|
||||
// if a response to Ping is not received.
|
||||
// Defaults to 15s.
|
||||
PingTimeout time.Duration
|
||||
|
||||
// t1, if non-nil, is the standard library Transport using
|
||||
// this transport. Its settings are used (but not its
|
||||
// RoundTrip method, etc).
|
||||
@ -131,6 +144,14 @@ func (t *Transport) disableCompression() bool {
|
||||
return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
|
||||
}
|
||||
|
||||
func (t *Transport) pingTimeout() time.Duration {
|
||||
if t.PingTimeout == 0 {
|
||||
return 15 * time.Second
|
||||
}
|
||||
return t.PingTimeout
|
||||
|
||||
}
|
||||
|
||||
// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
|
||||
// It returns an error if t1 has already been HTTP/2-enabled.
|
||||
func ConfigureTransport(t1 *http.Transport) error {
|
||||
@ -227,6 +248,7 @@ type ClientConn struct {
|
||||
br *bufio.Reader
|
||||
fr *Framer
|
||||
lastActive time.Time
|
||||
lastIdle time.Time // time last idle
|
||||
// Settings from peer: (also guarded by mu)
|
||||
maxFrameSize uint32
|
||||
maxConcurrentStreams uint32
|
||||
@ -603,7 +625,7 @@ func (t *Transport) expectContinueTimeout() time.Duration {
|
||||
}
|
||||
|
||||
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
|
||||
return t.newClientConn(c, false)
|
||||
return t.newClientConn(c, t.disableKeepAlives())
|
||||
}
|
||||
|
||||
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
|
||||
@ -674,6 +696,20 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
return cc, nil
|
||||
}
|
||||
|
||||
func (cc *ClientConn) healthCheck() {
|
||||
pingTimeout := cc.t.pingTimeout()
|
||||
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
|
||||
// trigger the healthCheck again if there is no frame received.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
|
||||
defer cancel()
|
||||
err := cc.Ping(ctx)
|
||||
if err != nil {
|
||||
cc.closeForLostPing()
|
||||
cc.t.connPool().MarkDead(cc)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
@ -736,7 +772,8 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
|
||||
}
|
||||
|
||||
st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay &&
|
||||
int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32
|
||||
int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
|
||||
!cc.tooIdleLocked()
|
||||
st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest
|
||||
return
|
||||
}
|
||||
@ -746,6 +783,16 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool {
|
||||
return st.canTakeNewRequest
|
||||
}
|
||||
|
||||
// tooIdleLocked reports whether this connection has been been sitting idle
|
||||
// for too much wall time.
|
||||
func (cc *ClientConn) tooIdleLocked() bool {
|
||||
// The Round(0) strips the monontonic clock reading so the
|
||||
// times are compared based on their wall time. We don't want
|
||||
// to reuse a connection that's been sitting idle during
|
||||
// VM/laptop suspend if monotonic time was also frozen.
|
||||
return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout
|
||||
}
|
||||
|
||||
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
|
||||
// only be called when we're idle, but because we're coming from a new
|
||||
// goroutine, there could be a new request coming in at the same time,
|
||||
@ -834,14 +881,12 @@ func (cc *ClientConn) sendGoAway() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the client connection immediately.
|
||||
//
|
||||
// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
|
||||
func (cc *ClientConn) Close() error {
|
||||
// closes the client connection immediately. In-flight requests are interrupted.
|
||||
// err is sent to streams.
|
||||
func (cc *ClientConn) closeForError(err error) error {
|
||||
cc.mu.Lock()
|
||||
defer cc.cond.Broadcast()
|
||||
defer cc.mu.Unlock()
|
||||
err := errors.New("http2: client connection force closed via ClientConn.Close")
|
||||
for id, cs := range cc.streams {
|
||||
select {
|
||||
case cs.resc <- resAndError{err: err}:
|
||||
@ -854,6 +899,20 @@ func (cc *ClientConn) Close() error {
|
||||
return cc.tconn.Close()
|
||||
}
|
||||
|
||||
// Close closes the client connection immediately.
|
||||
//
|
||||
// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
|
||||
func (cc *ClientConn) Close() error {
|
||||
err := errors.New("http2: client connection force closed via ClientConn.Close")
|
||||
return cc.closeForError(err)
|
||||
}
|
||||
|
||||
// closes the client connection immediately. In-flight requests are interrupted.
|
||||
func (cc *ClientConn) closeForLostPing() error {
|
||||
err := errors.New("http2: client connection lost")
|
||||
return cc.closeForError(err)
|
||||
}
|
||||
|
||||
const maxAllocFrameSize = 512 << 10
|
||||
|
||||
// frameBuffer returns a scratch buffer suitable for writing DATA frames.
|
||||
@ -904,7 +963,7 @@ func commaSeparatedTrailers(req *http.Request) (string, error) {
|
||||
k = http.CanonicalHeaderKey(k)
|
||||
switch k {
|
||||
case "Transfer-Encoding", "Trailer", "Content-Length":
|
||||
return "", &badStringError{"invalid Trailer key", k}
|
||||
return "", fmt.Errorf("invalid Trailer key %q", k)
|
||||
}
|
||||
keys = append(keys, k)
|
||||
}
|
||||
@ -992,7 +1051,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf
|
||||
req.Method != "HEAD" {
|
||||
// Request gzip only, not deflate. Deflate is ambiguous and
|
||||
// not as universally supported anyway.
|
||||
// See: http://www.gzip.org/zlib/zlib_faq.html#faq38
|
||||
// See: https://zlib.net/zlib_faq.html#faq39
|
||||
//
|
||||
// Note that we don't request this for HEAD requests,
|
||||
// due to a bug in nginx:
|
||||
@ -1150,6 +1209,7 @@ func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error {
|
||||
}
|
||||
return errClientConnUnusable
|
||||
}
|
||||
cc.lastIdle = time.Time{}
|
||||
if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) {
|
||||
if waitingForConn != nil {
|
||||
close(waitingForConn)
|
||||
@ -1216,6 +1276,8 @@ var (
|
||||
|
||||
// abort request body write, but send stream reset of cancel.
|
||||
errStopReqBodyWriteAndCancel = errors.New("http2: canceling request")
|
||||
|
||||
errReqBodyTooLong = errors.New("http2: request body larger than specified content length")
|
||||
)
|
||||
|
||||
func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) {
|
||||
@ -1238,10 +1300,32 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
|
||||
|
||||
req := cs.req
|
||||
hasTrailers := req.Trailer != nil
|
||||
remainLen := actualContentLength(req)
|
||||
hasContentLen := remainLen != -1
|
||||
|
||||
var sawEOF bool
|
||||
for !sawEOF {
|
||||
n, err := body.Read(buf)
|
||||
n, err := body.Read(buf[:len(buf)-1])
|
||||
if hasContentLen {
|
||||
remainLen -= int64(n)
|
||||
if remainLen == 0 && err == nil {
|
||||
// The request body's Content-Length was predeclared and
|
||||
// we just finished reading it all, but the underlying io.Reader
|
||||
// returned the final chunk with a nil error (which is one of
|
||||
// the two valid things a Reader can do at EOF). Because we'd prefer
|
||||
// to send the END_STREAM bit early, double-check that we're actually
|
||||
// at EOF. Subsequent reads should return (0, EOF) at this point.
|
||||
// If either value is different, we return an error in one of two ways below.
|
||||
var n1 int
|
||||
n1, err = body.Read(buf[n:])
|
||||
remainLen -= int64(n1)
|
||||
}
|
||||
if remainLen < 0 {
|
||||
err = errReqBodyTooLong
|
||||
cc.writeStreamReset(cs.ID, ErrCodeCancel, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err == io.EOF {
|
||||
sawEOF = true
|
||||
err = nil
|
||||
@ -1357,13 +1441,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
|
||||
}
|
||||
}
|
||||
|
||||
type badStringError struct {
|
||||
what string
|
||||
str string
|
||||
}
|
||||
|
||||
func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) }
|
||||
|
||||
// requires cc.mu be held.
|
||||
func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
|
||||
cc.hbuf.Reset()
|
||||
@ -1454,7 +1531,29 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
|
||||
if vv[0] == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
} else if strings.EqualFold(k, "cookie") {
|
||||
// Per 8.1.2.5 To allow for better compression efficiency, the
|
||||
// Cookie header field MAY be split into separate header fields,
|
||||
// each with one or more cookie-pairs.
|
||||
for _, v := range vv {
|
||||
for {
|
||||
p := strings.IndexByte(v, ';')
|
||||
if p < 0 {
|
||||
break
|
||||
}
|
||||
f("cookie", v[:p])
|
||||
p++
|
||||
// strip space after semicolon if any.
|
||||
for p+1 <= len(v) && v[p] == ' ' {
|
||||
p++
|
||||
}
|
||||
v = v[p:]
|
||||
}
|
||||
if len(v) > 0 {
|
||||
f("cookie", v)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
for _, v := range vv {
|
||||
@ -1557,6 +1656,7 @@ func (cc *ClientConn) writeHeader(name, value string) {
|
||||
}
|
||||
|
||||
type resAndError struct {
|
||||
_ incomparable
|
||||
res *http.Response
|
||||
err error
|
||||
}
|
||||
@ -1592,6 +1692,7 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
|
||||
delete(cc.streams, id)
|
||||
if len(cc.streams) == 0 && cc.idleTimer != nil {
|
||||
cc.idleTimer.Reset(cc.idleTimeout)
|
||||
cc.lastIdle = time.Now()
|
||||
}
|
||||
close(cs.done)
|
||||
// Wake up checkResetOrDone via clientStream.awaitFlowControl and
|
||||
@ -1603,6 +1704,7 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
|
||||
|
||||
// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
|
||||
type clientConnReadLoop struct {
|
||||
_ incomparable
|
||||
cc *ClientConn
|
||||
closeWhenIdle bool
|
||||
}
|
||||
@ -1682,8 +1784,17 @@ func (rl *clientConnReadLoop) run() error {
|
||||
rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse
|
||||
gotReply := false // ever saw a HEADERS reply
|
||||
gotSettings := false
|
||||
readIdleTimeout := cc.t.ReadIdleTimeout
|
||||
var t *time.Timer
|
||||
if readIdleTimeout != 0 {
|
||||
t = time.AfterFunc(readIdleTimeout, cc.healthCheck)
|
||||
defer t.Stop()
|
||||
}
|
||||
for {
|
||||
f, err := cc.fr.ReadFrame()
|
||||
if t != nil {
|
||||
t.Reset(readIdleTimeout)
|
||||
}
|
||||
if err != nil {
|
||||
cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
|
||||
}
|
||||
@ -1832,7 +1943,9 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
|
||||
return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header")
|
||||
}
|
||||
|
||||
header := make(http.Header)
|
||||
regularFields := f.RegularFields()
|
||||
strs := make([]string, len(regularFields))
|
||||
header := make(http.Header, len(regularFields))
|
||||
res := &http.Response{
|
||||
Proto: "HTTP/2.0",
|
||||
ProtoMajor: 2,
|
||||
@ -1840,7 +1953,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
|
||||
StatusCode: statusCode,
|
||||
Status: status + " " + http.StatusText(statusCode),
|
||||
}
|
||||
for _, hf := range f.RegularFields() {
|
||||
for _, hf := range regularFields {
|
||||
key := http.CanonicalHeaderKey(hf.Name)
|
||||
if key == "Trailer" {
|
||||
t := res.Trailer
|
||||
@ -1852,7 +1965,18 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
|
||||
t[http.CanonicalHeaderKey(v)] = nil
|
||||
})
|
||||
} else {
|
||||
header[key] = append(header[key], hf.Value)
|
||||
vv := header[key]
|
||||
if vv == nil && len(strs) > 0 {
|
||||
// More than likely this will be a single-element key.
|
||||
// Most headers aren't multi-valued.
|
||||
// Set the capacity on strs[0] to 1, so any future append
|
||||
// won't extend the slice into the other strings.
|
||||
vv, strs = strs[:1:1], strs[1:]
|
||||
vv[0] = hf.Value
|
||||
header[key] = vv
|
||||
} else {
|
||||
header[key] = append(vv, hf.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2138,8 +2262,6 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var errInvalidTrailers = errors.New("http2: invalid trailers")
|
||||
|
||||
func (rl *clientConnReadLoop) endStream(cs *clientStream) {
|
||||
// TODO: check that any declared content-length matches, like
|
||||
// server.go's (*stream).endStream method.
|
||||
@ -2370,7 +2492,6 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error)
|
||||
var (
|
||||
errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
|
||||
errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit")
|
||||
errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers")
|
||||
)
|
||||
|
||||
func (cc *ClientConn) logf(format string, args ...interface{}) {
|
||||
@ -2409,6 +2530,7 @@ func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) {
|
||||
// gzipReader wraps a response body so it can lazily
|
||||
// call gzip.NewReader on the first call to Read
|
||||
type gzipReader struct {
|
||||
_ incomparable
|
||||
body io.ReadCloser // underlying Response.Body
|
||||
zr *gzip.Reader // lazily-initialized gzip reader
|
||||
zerr error // sticky error
|
||||
|
8
vendor/golang.org/x/net/http2/writesched.go
generated
vendored
8
vendor/golang.org/x/net/http2/writesched.go
generated
vendored
@ -32,7 +32,7 @@ type WriteScheduler interface {
|
||||
|
||||
// Pop dequeues the next frame to write. Returns false if no frames can
|
||||
// be written. Frames with a given wr.StreamID() are Pop'd in the same
|
||||
// order they are Push'd.
|
||||
// order they are Push'd. No frames should be discarded except by CloseStream.
|
||||
Pop() (wr FrameWriteRequest, ok bool)
|
||||
}
|
||||
|
||||
@ -76,6 +76,12 @@ func (wr FrameWriteRequest) StreamID() uint32 {
|
||||
return wr.stream.id
|
||||
}
|
||||
|
||||
// isControl reports whether wr is a control frame for MaxQueuedControlFrames
|
||||
// purposes. That includes non-stream frames and RST_STREAM frames.
|
||||
func (wr FrameWriteRequest) isControl() bool {
|
||||
return wr.stream == nil
|
||||
}
|
||||
|
||||
// DataSize returns the number of flow control bytes that must be consumed
|
||||
// to write this entire frame. This is 0 for non-DATA frames.
|
||||
func (wr FrameWriteRequest) DataSize() int {
|
||||
|
2
vendor/golang.org/x/net/http2/writesched_priority.go
generated
vendored
2
vendor/golang.org/x/net/http2/writesched_priority.go
generated
vendored
@ -149,7 +149,7 @@ func (n *priorityNode) addBytes(b int64) {
|
||||
}
|
||||
|
||||
// walkReadyInOrder iterates over the tree in priority order, calling f for each node
|
||||
// with a non-empty write queue. When f returns true, this funcion returns true and the
|
||||
// with a non-empty write queue. When f returns true, this function returns true and the
|
||||
// walk halts. tmp is used as scratch space for sorting.
|
||||
//
|
||||
// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
|
||||
|
9
vendor/golang.org/x/net/http2/writesched_random.go
generated
vendored
9
vendor/golang.org/x/net/http2/writesched_random.go
generated
vendored
@ -19,7 +19,8 @@ type randomWriteScheduler struct {
|
||||
zero writeQueue
|
||||
|
||||
// sq contains the stream-specific queues, keyed by stream ID.
|
||||
// When a stream is idle or closed, it's deleted from the map.
|
||||
// When a stream is idle, closed, or emptied, it's deleted
|
||||
// from the map.
|
||||
sq map[uint32]*writeQueue
|
||||
|
||||
// pool of empty queues for reuse.
|
||||
@ -63,8 +64,12 @@ func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {
|
||||
return ws.zero.shift(), true
|
||||
}
|
||||
// Iterate over all non-idle streams until finding one that can be consumed.
|
||||
for _, q := range ws.sq {
|
||||
for streamID, q := range ws.sq {
|
||||
if wr, ok := q.consume(math.MaxInt32); ok {
|
||||
if q.empty() {
|
||||
delete(ws.sq, streamID)
|
||||
ws.queuePool.put(q)
|
||||
}
|
||||
return wr, true
|
||||
}
|
||||
}
|
||||
|
2
vendor/golang.org/x/net/idna/tables11.0.0.go
generated
vendored
2
vendor/golang.org/x/net/idna/tables11.0.0.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// +build go1.13
|
||||
// +build go1.13,!go1.14
|
||||
|
||||
package idna
|
||||
|
||||
|
4733
vendor/golang.org/x/net/idna/tables12.00.go
generated
vendored
Normal file
4733
vendor/golang.org/x/net/idna/tables12.00.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
6
vendor/golang.org/x/net/internal/timeseries/timeseries.go
generated
vendored
6
vendor/golang.org/x/net/internal/timeseries/timeseries.go
generated
vendored
@ -403,9 +403,9 @@ func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, resu
|
||||
|
||||
// Where should scanning start?
|
||||
if dstStart.After(srcStart) {
|
||||
advance := dstStart.Sub(srcStart) / srcInterval
|
||||
srcIndex += int(advance)
|
||||
srcStart = srcStart.Add(advance * srcInterval)
|
||||
advance := int(dstStart.Sub(srcStart) / srcInterval)
|
||||
srcIndex += advance
|
||||
srcStart = srcStart.Add(time.Duration(advance) * srcInterval)
|
||||
}
|
||||
|
||||
// The i'th value is computed as show below.
|
||||
|
6
vendor/golang.org/x/net/websocket/websocket.go
generated
vendored
6
vendor/golang.org/x/net/websocket/websocket.go
generated
vendored
@ -5,11 +5,11 @@
|
||||
// Package websocket implements a client and server for the WebSocket protocol
|
||||
// as specified in RFC 6455.
|
||||
//
|
||||
// This package currently lacks some features found in an alternative
|
||||
// and more actively maintained WebSocket package:
|
||||
// This package currently lacks some features found in alternative
|
||||
// and more actively maintained WebSocket packages:
|
||||
//
|
||||
// https://godoc.org/github.com/gorilla/websocket
|
||||
//
|
||||
// https://godoc.org/nhooyr.io/websocket
|
||||
package websocket // import "golang.org/x/net/websocket"
|
||||
|
||||
import (
|
||||
|
Loading…
Reference in New Issue
Block a user