updated vendor
This commit is contained in:
88
vendor/golang.org/x/net/http2/flow.go
generated
vendored
88
vendor/golang.org/x/net/http2/flow.go
generated
vendored
@@ -6,23 +6,91 @@
|
||||
|
||||
package http2
|
||||
|
||||
// flow is the flow control window's size.
|
||||
type flow struct {
|
||||
// inflowMinRefresh is the minimum number of bytes we'll send for a
|
||||
// flow control window update.
|
||||
const inflowMinRefresh = 4 << 10
|
||||
|
||||
// inflow accounts for an inbound flow control window.
|
||||
// It tracks both the latest window sent to the peer (used for enforcement)
|
||||
// and the accumulated unsent window.
|
||||
type inflow struct {
|
||||
avail int32
|
||||
unsent int32
|
||||
}
|
||||
|
||||
// init sets the initial window.
|
||||
func (f *inflow) init(n int32) {
|
||||
f.avail = n
|
||||
}
|
||||
|
||||
// add adds n bytes to the window, with a maximum window size of max,
|
||||
// indicating that the peer can now send us more data.
|
||||
// For example, the user read from a {Request,Response} body and consumed
|
||||
// some of the buffered data, so the peer can now send more.
|
||||
// It returns the number of bytes to send in a WINDOW_UPDATE frame to the peer.
|
||||
// Window updates are accumulated and sent when the unsent capacity
|
||||
// is at least inflowMinRefresh or will at least double the peer's available window.
|
||||
func (f *inflow) add(n int) (connAdd int32) {
|
||||
if n < 0 {
|
||||
panic("negative update")
|
||||
}
|
||||
unsent := int64(f.unsent) + int64(n)
|
||||
// "A sender MUST NOT allow a flow-control window to exceed 2^31-1 octets."
|
||||
// RFC 7540 Section 6.9.1.
|
||||
const maxWindow = 1<<31 - 1
|
||||
if unsent+int64(f.avail) > maxWindow {
|
||||
panic("flow control update exceeds maximum window size")
|
||||
}
|
||||
f.unsent = int32(unsent)
|
||||
if f.unsent < inflowMinRefresh && f.unsent < f.avail {
|
||||
// If there aren't at least inflowMinRefresh bytes of window to send,
|
||||
// and this update won't at least double the window, buffer the update for later.
|
||||
return 0
|
||||
}
|
||||
f.avail += f.unsent
|
||||
f.unsent = 0
|
||||
return int32(unsent)
|
||||
}
|
||||
|
||||
// take attempts to take n bytes from the peer's flow control window.
|
||||
// It reports whether the window has available capacity.
|
||||
func (f *inflow) take(n uint32) bool {
|
||||
if n > uint32(f.avail) {
|
||||
return false
|
||||
}
|
||||
f.avail -= int32(n)
|
||||
return true
|
||||
}
|
||||
|
||||
// takeInflows attempts to take n bytes from two inflows,
|
||||
// typically connection-level and stream-level flows.
|
||||
// It reports whether both windows have available capacity.
|
||||
func takeInflows(f1, f2 *inflow, n uint32) bool {
|
||||
if n > uint32(f1.avail) || n > uint32(f2.avail) {
|
||||
return false
|
||||
}
|
||||
f1.avail -= int32(n)
|
||||
f2.avail -= int32(n)
|
||||
return true
|
||||
}
|
||||
|
||||
// outflow is the outbound flow control window's size.
|
||||
type outflow struct {
|
||||
_ incomparable
|
||||
|
||||
// n is the number of DATA bytes we're allowed to send.
|
||||
// A flow is kept both on a conn and a per-stream.
|
||||
// An outflow is kept both on a conn and a per-stream.
|
||||
n int32
|
||||
|
||||
// conn points to the shared connection-level flow that is
|
||||
// shared by all streams on that conn. It is nil for the flow
|
||||
// conn points to the shared connection-level outflow that is
|
||||
// shared by all streams on that conn. It is nil for the outflow
|
||||
// that's on the conn directly.
|
||||
conn *flow
|
||||
conn *outflow
|
||||
}
|
||||
|
||||
func (f *flow) setConnFlow(cf *flow) { f.conn = cf }
|
||||
func (f *outflow) setConnFlow(cf *outflow) { f.conn = cf }
|
||||
|
||||
func (f *flow) available() int32 {
|
||||
func (f *outflow) available() int32 {
|
||||
n := f.n
|
||||
if f.conn != nil && f.conn.n < n {
|
||||
n = f.conn.n
|
||||
@@ -30,7 +98,7 @@ func (f *flow) available() int32 {
|
||||
return n
|
||||
}
|
||||
|
||||
func (f *flow) take(n int32) {
|
||||
func (f *outflow) take(n int32) {
|
||||
if n > f.available() {
|
||||
panic("internal error: took too much")
|
||||
}
|
||||
@@ -42,7 +110,7 @@ func (f *flow) take(n int32) {
|
||||
|
||||
// add adds n bytes (positive or negative) to the flow control window.
|
||||
// It returns false if the sum would exceed 2^31-1.
|
||||
func (f *flow) add(n int32) bool {
|
||||
func (f *outflow) add(n int32) bool {
|
||||
sum := f.n + n
|
||||
if (sum > n) == (f.n > 0) {
|
||||
f.n = sum
|
||||
|
11
vendor/golang.org/x/net/http2/frame.go
generated
vendored
11
vendor/golang.org/x/net/http2/frame.go
generated
vendored
@@ -662,6 +662,15 @@ func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
|
||||
// It is the caller's responsibility not to violate the maximum frame size
|
||||
// and to not call other Write methods concurrently.
|
||||
func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
|
||||
if err := f.startWriteDataPadded(streamID, endStream, data, pad); err != nil {
|
||||
return err
|
||||
}
|
||||
return f.endWrite()
|
||||
}
|
||||
|
||||
// startWriteDataPadded is WriteDataPadded, but only writes the frame to the Framer's internal buffer.
|
||||
// The caller should call endWrite to flush the frame to the underlying writer.
|
||||
func (f *Framer) startWriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
|
||||
if !validStreamID(streamID) && !f.AllowIllegalWrites {
|
||||
return errStreamID
|
||||
}
|
||||
@@ -691,7 +700,7 @@ func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []by
|
||||
}
|
||||
f.wbuf = append(f.wbuf, data...)
|
||||
f.wbuf = append(f.wbuf, pad...)
|
||||
return f.endWrite()
|
||||
return nil
|
||||
}
|
||||
|
||||
// A SettingsFrame conveys configuration parameters that affect how
|
||||
|
87
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
87
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
@@ -211,7 +211,7 @@ func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
|
||||
return dt.ents[dt.len()-(int(i)-staticTable.len())], true
|
||||
}
|
||||
|
||||
// Decode decodes an entire block.
|
||||
// DecodeFull decodes an entire block.
|
||||
//
|
||||
// TODO: remove this method and make it incremental later? This is
|
||||
// easier for debugging now.
|
||||
@@ -359,6 +359,7 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
|
||||
|
||||
var hf HeaderField
|
||||
wantStr := d.emitEnabled || it.indexed()
|
||||
var undecodedName undecodedString
|
||||
if nameIdx > 0 {
|
||||
ihf, ok := d.at(nameIdx)
|
||||
if !ok {
|
||||
@@ -366,15 +367,27 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
|
||||
}
|
||||
hf.Name = ihf.Name
|
||||
} else {
|
||||
hf.Name, buf, err = d.readString(buf, wantStr)
|
||||
undecodedName, buf, err = d.readString(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
hf.Value, buf, err = d.readString(buf, wantStr)
|
||||
undecodedValue, buf, err := d.readString(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if wantStr {
|
||||
if nameIdx <= 0 {
|
||||
hf.Name, err = d.decodeString(undecodedName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
hf.Value, err = d.decodeString(undecodedValue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
d.buf = buf
|
||||
if it.indexed() {
|
||||
d.dynTab.add(hf)
|
||||
@@ -459,46 +472,52 @@ func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
|
||||
return 0, origP, errNeedMore
|
||||
}
|
||||
|
||||
// readString decodes an hpack string from p.
|
||||
// readString reads an hpack string from p.
|
||||
//
|
||||
// wantStr is whether s will be used. If false, decompression and
|
||||
// []byte->string garbage are skipped if s will be ignored
|
||||
// anyway. This does mean that huffman decoding errors for non-indexed
|
||||
// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
|
||||
// is returning an error anyway, and because they're not indexed, the error
|
||||
// won't affect the decoding state.
|
||||
func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
|
||||
// It returns a reference to the encoded string data to permit deferring decode costs
|
||||
// until after the caller verifies all data is present.
|
||||
func (d *Decoder) readString(p []byte) (u undecodedString, remain []byte, err error) {
|
||||
if len(p) == 0 {
|
||||
return "", p, errNeedMore
|
||||
return u, p, errNeedMore
|
||||
}
|
||||
isHuff := p[0]&128 != 0
|
||||
strLen, p, err := readVarInt(7, p)
|
||||
if err != nil {
|
||||
return "", p, err
|
||||
return u, p, err
|
||||
}
|
||||
if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
|
||||
return "", nil, ErrStringLength
|
||||
// Returning an error here means Huffman decoding errors
|
||||
// for non-indexed strings past the maximum string length
|
||||
// are ignored, but the server is returning an error anyway
|
||||
// and because the string is not indexed the error will not
|
||||
// affect the decoding state.
|
||||
return u, nil, ErrStringLength
|
||||
}
|
||||
if uint64(len(p)) < strLen {
|
||||
return "", p, errNeedMore
|
||||
return u, p, errNeedMore
|
||||
}
|
||||
if !isHuff {
|
||||
if wantStr {
|
||||
s = string(p[:strLen])
|
||||
}
|
||||
return s, p[strLen:], nil
|
||||
}
|
||||
|
||||
if wantStr {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset() // don't trust others
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
|
||||
buf.Reset()
|
||||
return "", nil, err
|
||||
}
|
||||
s = buf.String()
|
||||
buf.Reset() // be nice to GC
|
||||
}
|
||||
return s, p[strLen:], nil
|
||||
u.isHuff = isHuff
|
||||
u.b = p[:strLen]
|
||||
return u, p[strLen:], nil
|
||||
}
|
||||
|
||||
type undecodedString struct {
|
||||
isHuff bool
|
||||
b []byte
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeString(u undecodedString) (string, error) {
|
||||
if !u.isHuff {
|
||||
return string(u.b), nil
|
||||
}
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset() // don't trust others
|
||||
var s string
|
||||
err := huffmanDecode(buf, d.maxStrLen, u.b)
|
||||
if err == nil {
|
||||
s = buf.String()
|
||||
}
|
||||
buf.Reset() // be nice to GC
|
||||
bufPool.Put(buf)
|
||||
return s, err
|
||||
}
|
||||
|
107
vendor/golang.org/x/net/http2/server.go
generated
vendored
107
vendor/golang.org/x/net/http2/server.go
generated
vendored
@@ -448,7 +448,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
||||
// configured value for inflow, that will be updated when we send a
|
||||
// WINDOW_UPDATE shortly after sending SETTINGS.
|
||||
sc.flow.add(initialWindowSize)
|
||||
sc.inflow.add(initialWindowSize)
|
||||
sc.inflow.init(initialWindowSize)
|
||||
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
|
||||
sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize())
|
||||
|
||||
@@ -563,8 +563,8 @@ type serverConn struct {
|
||||
wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
|
||||
bodyReadCh chan bodyReadMsg // from handlers -> serve
|
||||
serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop
|
||||
flow flow // conn-wide (not stream-specific) outbound flow control
|
||||
inflow flow // conn-wide inbound flow control
|
||||
flow outflow // conn-wide (not stream-specific) outbound flow control
|
||||
inflow inflow // conn-wide inbound flow control
|
||||
tlsState *tls.ConnectionState // shared by all handlers, like net/http
|
||||
remoteAddrStr string
|
||||
writeSched WriteScheduler
|
||||
@@ -641,10 +641,10 @@ type stream struct {
|
||||
cancelCtx func()
|
||||
|
||||
// owned by serverConn's serve loop:
|
||||
bodyBytes int64 // body bytes seen so far
|
||||
declBodyBytes int64 // or -1 if undeclared
|
||||
flow flow // limits writing from Handler to client
|
||||
inflow flow // what the client is allowed to POST/etc to us
|
||||
bodyBytes int64 // body bytes seen so far
|
||||
declBodyBytes int64 // or -1 if undeclared
|
||||
flow outflow // limits writing from Handler to client
|
||||
inflow inflow // what the client is allowed to POST/etc to us
|
||||
state streamState
|
||||
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
|
||||
gotTrailerHeader bool // HEADER frame for trailers was seen
|
||||
@@ -843,8 +843,13 @@ type frameWriteResult struct {
|
||||
// and then reports when it's done.
|
||||
// At most one goroutine can be running writeFrameAsync at a time per
|
||||
// serverConn.
|
||||
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) {
|
||||
err := wr.write.writeFrame(sc)
|
||||
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
|
||||
var err error
|
||||
if wd == nil {
|
||||
err = wr.write.writeFrame(sc)
|
||||
} else {
|
||||
err = sc.framer.endWrite()
|
||||
}
|
||||
sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err}
|
||||
}
|
||||
|
||||
@@ -1251,9 +1256,16 @@ func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
|
||||
sc.writingFrameAsync = false
|
||||
err := wr.write.writeFrame(sc)
|
||||
sc.wroteFrame(frameWriteResult{wr: wr, err: err})
|
||||
} else if wd, ok := wr.write.(*writeData); ok {
|
||||
// Encode the frame in the serve goroutine, to ensure we don't have
|
||||
// any lingering asynchronous references to data passed to Write.
|
||||
// See https://go.dev/issue/58446.
|
||||
sc.framer.startWriteDataPadded(wd.streamID, wd.endStream, wd.p, nil)
|
||||
sc.writingFrameAsync = true
|
||||
go sc.writeFrameAsync(wr, wd)
|
||||
} else {
|
||||
sc.writingFrameAsync = true
|
||||
go sc.writeFrameAsync(wr)
|
||||
go sc.writeFrameAsync(wr, nil)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1503,7 +1515,7 @@ func (sc *serverConn) processFrame(f Frame) error {
|
||||
if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || f.Header().StreamID > sc.maxClientStreamID) {
|
||||
|
||||
if f, ok := f.(*DataFrame); ok {
|
||||
if sc.inflow.available() < int32(f.Length) {
|
||||
if !sc.inflow.take(f.Length) {
|
||||
return sc.countError("data_flow", streamError(f.Header().StreamID, ErrCodeFlowControl))
|
||||
}
|
||||
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
|
||||
@@ -1775,14 +1787,9 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
||||
// But still enforce their connection-level flow control,
|
||||
// and return any flow control bytes since we're not going
|
||||
// to consume them.
|
||||
if sc.inflow.available() < int32(f.Length) {
|
||||
if !sc.inflow.take(f.Length) {
|
||||
return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
|
||||
}
|
||||
// Deduct the flow control from inflow, since we're
|
||||
// going to immediately add it back in
|
||||
// sendWindowUpdate, which also schedules sending the
|
||||
// frames.
|
||||
sc.inflow.take(int32(f.Length))
|
||||
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
|
||||
|
||||
if st != nil && st.resetQueued {
|
||||
@@ -1797,10 +1804,9 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
||||
|
||||
// Sender sending more than they'd declared?
|
||||
if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
|
||||
if sc.inflow.available() < int32(f.Length) {
|
||||
if !sc.inflow.take(f.Length) {
|
||||
return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
|
||||
}
|
||||
sc.inflow.take(int32(f.Length))
|
||||
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
|
||||
|
||||
st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
|
||||
@@ -1811,10 +1817,9 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
||||
}
|
||||
if f.Length > 0 {
|
||||
// Check whether the client has flow control quota.
|
||||
if st.inflow.available() < int32(f.Length) {
|
||||
if !takeInflows(&sc.inflow, &st.inflow, f.Length) {
|
||||
return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl))
|
||||
}
|
||||
st.inflow.take(int32(f.Length))
|
||||
|
||||
if len(data) > 0 {
|
||||
wrote, err := st.body.Write(data)
|
||||
@@ -1830,10 +1835,12 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
||||
|
||||
// Return any padded flow control now, since we won't
|
||||
// refund it later on body reads.
|
||||
if pad := int32(f.Length) - int32(len(data)); pad > 0 {
|
||||
sc.sendWindowUpdate32(nil, pad)
|
||||
sc.sendWindowUpdate32(st, pad)
|
||||
}
|
||||
// Call sendWindowUpdate even if there is no padding,
|
||||
// to return buffered flow control credit if the sent
|
||||
// window has shrunk.
|
||||
pad := int32(f.Length) - int32(len(data))
|
||||
sc.sendWindowUpdate32(nil, pad)
|
||||
sc.sendWindowUpdate32(st, pad)
|
||||
}
|
||||
if f.StreamEnded() {
|
||||
st.endStream()
|
||||
@@ -2105,8 +2112,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
|
||||
st.cw.Init()
|
||||
st.flow.conn = &sc.flow // link to conn-level counter
|
||||
st.flow.add(sc.initialStreamSendWindowSize)
|
||||
st.inflow.conn = &sc.inflow // link to conn-level counter
|
||||
st.inflow.add(sc.srv.initialStreamRecvWindowSize())
|
||||
st.inflow.init(sc.srv.initialStreamRecvWindowSize())
|
||||
if sc.hs.WriteTimeout != 0 {
|
||||
st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
|
||||
}
|
||||
@@ -2198,7 +2204,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r
|
||||
tlsState = sc.tlsState
|
||||
}
|
||||
|
||||
needsContinue := rp.header.Get("Expect") == "100-continue"
|
||||
needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue")
|
||||
if needsContinue {
|
||||
rp.header.Del("Expect")
|
||||
}
|
||||
@@ -2388,47 +2394,28 @@ func (sc *serverConn) noteBodyRead(st *stream, n int) {
|
||||
}
|
||||
|
||||
// st may be nil for conn-level
|
||||
func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
|
||||
sc.serveG.check()
|
||||
// "The legal range for the increment to the flow control
|
||||
// window is 1 to 2^31-1 (2,147,483,647) octets."
|
||||
// A Go Read call on 64-bit machines could in theory read
|
||||
// a larger Read than this. Very unlikely, but we handle it here
|
||||
// rather than elsewhere for now.
|
||||
const maxUint31 = 1<<31 - 1
|
||||
for n > maxUint31 {
|
||||
sc.sendWindowUpdate32(st, maxUint31)
|
||||
n -= maxUint31
|
||||
}
|
||||
sc.sendWindowUpdate32(st, int32(n))
|
||||
func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
|
||||
sc.sendWindowUpdate(st, int(n))
|
||||
}
|
||||
|
||||
// st may be nil for conn-level
|
||||
func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
|
||||
func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
|
||||
sc.serveG.check()
|
||||
if n == 0 {
|
||||
var streamID uint32
|
||||
var send int32
|
||||
if st == nil {
|
||||
send = sc.inflow.add(n)
|
||||
} else {
|
||||
streamID = st.id
|
||||
send = st.inflow.add(n)
|
||||
}
|
||||
if send == 0 {
|
||||
return
|
||||
}
|
||||
if n < 0 {
|
||||
panic("negative update")
|
||||
}
|
||||
var streamID uint32
|
||||
if st != nil {
|
||||
streamID = st.id
|
||||
}
|
||||
sc.writeFrame(FrameWriteRequest{
|
||||
write: writeWindowUpdate{streamID: streamID, n: uint32(n)},
|
||||
write: writeWindowUpdate{streamID: streamID, n: uint32(send)},
|
||||
stream: st,
|
||||
})
|
||||
var ok bool
|
||||
if st == nil {
|
||||
ok = sc.inflow.add(n)
|
||||
} else {
|
||||
ok = st.inflow.add(n)
|
||||
}
|
||||
if !ok {
|
||||
panic("internal error; sent too many window updates without decrements?")
|
||||
}
|
||||
}
|
||||
|
||||
// requestBody is the Handler's Request.Body type.
|
||||
|
88
vendor/golang.org/x/net/http2/transport.go
generated
vendored
88
vendor/golang.org/x/net/http2/transport.go
generated
vendored
@@ -47,10 +47,6 @@ const (
|
||||
// we buffer per stream.
|
||||
transportDefaultStreamFlow = 4 << 20
|
||||
|
||||
// transportDefaultStreamMinRefresh is the minimum number of bytes we'll send
|
||||
// a stream-level WINDOW_UPDATE for at a time.
|
||||
transportDefaultStreamMinRefresh = 4 << 10
|
||||
|
||||
defaultUserAgent = "Go-http-client/2.0"
|
||||
|
||||
// initialMaxConcurrentStreams is a connections maxConcurrentStreams until
|
||||
@@ -310,8 +306,8 @@ type ClientConn struct {
|
||||
|
||||
mu sync.Mutex // guards following
|
||||
cond *sync.Cond // hold mu; broadcast on flow/closed changes
|
||||
flow flow // our conn-level flow control quota (cs.flow is per stream)
|
||||
inflow flow // peer's conn-level flow control
|
||||
flow outflow // our conn-level flow control quota (cs.outflow is per stream)
|
||||
inflow inflow // peer's conn-level flow control
|
||||
doNotReuse bool // whether conn is marked to not be reused for any future requests
|
||||
closing bool
|
||||
closed bool
|
||||
@@ -376,10 +372,10 @@ type clientStream struct {
|
||||
respHeaderRecv chan struct{} // closed when headers are received
|
||||
res *http.Response // set if respHeaderRecv is closed
|
||||
|
||||
flow flow // guarded by cc.mu
|
||||
inflow flow // guarded by cc.mu
|
||||
bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
|
||||
readErr error // sticky read error; owned by transportResponseBody.Read
|
||||
flow outflow // guarded by cc.mu
|
||||
inflow inflow // guarded by cc.mu
|
||||
bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
|
||||
readErr error // sticky read error; owned by transportResponseBody.Read
|
||||
|
||||
reqBody io.ReadCloser
|
||||
reqBodyContentLength int64 // -1 means unknown
|
||||
@@ -811,7 +807,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
cc.bw.Write(clientPreface)
|
||||
cc.fr.WriteSettings(initialSettings...)
|
||||
cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
|
||||
cc.inflow.add(transportDefaultConnFlow + initialWindowSize)
|
||||
cc.inflow.init(transportDefaultConnFlow + initialWindowSize)
|
||||
cc.bw.Flush()
|
||||
if cc.werr != nil {
|
||||
cc.Close()
|
||||
@@ -1573,7 +1569,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
|
||||
close(cs.donec)
|
||||
}
|
||||
|
||||
// awaitOpenSlotForStream waits until len(streams) < maxConcurrentStreams.
|
||||
// awaitOpenSlotForStreamLocked waits until len(streams) < maxConcurrentStreams.
|
||||
// Must hold cc.mu.
|
||||
func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error {
|
||||
for {
|
||||
@@ -2073,8 +2069,7 @@ type resAndError struct {
|
||||
func (cc *ClientConn) addStreamLocked(cs *clientStream) {
|
||||
cs.flow.add(int32(cc.initialWindowSize))
|
||||
cs.flow.setConnFlow(&cc.flow)
|
||||
cs.inflow.add(transportDefaultStreamFlow)
|
||||
cs.inflow.setConnFlow(&cc.inflow)
|
||||
cs.inflow.init(transportDefaultStreamFlow)
|
||||
cs.ID = cc.nextStreamID
|
||||
cc.nextStreamID += 2
|
||||
cc.streams[cs.ID] = cs
|
||||
@@ -2533,21 +2528,10 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
cc.mu.Lock()
|
||||
var connAdd, streamAdd int32
|
||||
// Check the conn-level first, before the stream-level.
|
||||
if v := cc.inflow.available(); v < transportDefaultConnFlow/2 {
|
||||
connAdd = transportDefaultConnFlow - v
|
||||
cc.inflow.add(connAdd)
|
||||
}
|
||||
connAdd := cc.inflow.add(n)
|
||||
var streamAdd int32
|
||||
if err == nil { // No need to refresh if the stream is over or failed.
|
||||
// Consider any buffered body data (read from the conn but not
|
||||
// consumed by the client) when computing flow control for this
|
||||
// stream.
|
||||
v := int(cs.inflow.available()) + cs.bufPipe.Len()
|
||||
if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh {
|
||||
streamAdd = int32(transportDefaultStreamFlow - v)
|
||||
cs.inflow.add(streamAdd)
|
||||
}
|
||||
streamAdd = cs.inflow.add(n)
|
||||
}
|
||||
cc.mu.Unlock()
|
||||
|
||||
@@ -2575,17 +2559,15 @@ func (b transportResponseBody) Close() error {
|
||||
if unread > 0 {
|
||||
cc.mu.Lock()
|
||||
// Return connection-level flow control.
|
||||
if unread > 0 {
|
||||
cc.inflow.add(int32(unread))
|
||||
}
|
||||
connAdd := cc.inflow.add(unread)
|
||||
cc.mu.Unlock()
|
||||
|
||||
// TODO(dneil): Acquiring this mutex can block indefinitely.
|
||||
// Move flow control return to a goroutine?
|
||||
cc.wmu.Lock()
|
||||
// Return connection-level flow control.
|
||||
if unread > 0 {
|
||||
cc.fr.WriteWindowUpdate(0, uint32(unread))
|
||||
if connAdd > 0 {
|
||||
cc.fr.WriteWindowUpdate(0, uint32(connAdd))
|
||||
}
|
||||
cc.bw.Flush()
|
||||
cc.wmu.Unlock()
|
||||
@@ -2628,13 +2610,18 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
|
||||
// But at least return their flow control:
|
||||
if f.Length > 0 {
|
||||
cc.mu.Lock()
|
||||
cc.inflow.add(int32(f.Length))
|
||||
ok := cc.inflow.take(f.Length)
|
||||
connAdd := cc.inflow.add(int(f.Length))
|
||||
cc.mu.Unlock()
|
||||
|
||||
cc.wmu.Lock()
|
||||
cc.fr.WriteWindowUpdate(0, uint32(f.Length))
|
||||
cc.bw.Flush()
|
||||
cc.wmu.Unlock()
|
||||
if !ok {
|
||||
return ConnectionError(ErrCodeFlowControl)
|
||||
}
|
||||
if connAdd > 0 {
|
||||
cc.wmu.Lock()
|
||||
cc.fr.WriteWindowUpdate(0, uint32(connAdd))
|
||||
cc.bw.Flush()
|
||||
cc.wmu.Unlock()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -2665,9 +2652,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
|
||||
}
|
||||
// Check connection-level flow control.
|
||||
cc.mu.Lock()
|
||||
if cs.inflow.available() >= int32(f.Length) {
|
||||
cs.inflow.take(int32(f.Length))
|
||||
} else {
|
||||
if !takeInflows(&cc.inflow, &cs.inflow, f.Length) {
|
||||
cc.mu.Unlock()
|
||||
return ConnectionError(ErrCodeFlowControl)
|
||||
}
|
||||
@@ -2689,19 +2674,20 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
|
||||
}
|
||||
}
|
||||
|
||||
if refund > 0 {
|
||||
cc.inflow.add(int32(refund))
|
||||
if !didReset {
|
||||
cs.inflow.add(int32(refund))
|
||||
}
|
||||
sendConn := cc.inflow.add(refund)
|
||||
var sendStream int32
|
||||
if !didReset {
|
||||
sendStream = cs.inflow.add(refund)
|
||||
}
|
||||
cc.mu.Unlock()
|
||||
|
||||
if refund > 0 {
|
||||
if sendConn > 0 || sendStream > 0 {
|
||||
cc.wmu.Lock()
|
||||
cc.fr.WriteWindowUpdate(0, uint32(refund))
|
||||
if !didReset {
|
||||
cc.fr.WriteWindowUpdate(cs.ID, uint32(refund))
|
||||
if sendConn > 0 {
|
||||
cc.fr.WriteWindowUpdate(0, uint32(sendConn))
|
||||
}
|
||||
if sendStream > 0 {
|
||||
cc.fr.WriteWindowUpdate(cs.ID, uint32(sendStream))
|
||||
}
|
||||
cc.bw.Flush()
|
||||
cc.wmu.Unlock()
|
||||
|
2
vendor/golang.org/x/net/trace/histogram.go
generated
vendored
2
vendor/golang.org/x/net/trace/histogram.go
generated
vendored
@@ -32,7 +32,7 @@ type histogram struct {
|
||||
valueCount int64 // number of values recorded for single value
|
||||
}
|
||||
|
||||
// AddMeasurement records a value measurement observation to the histogram.
|
||||
// addMeasurement records a value measurement observation to the histogram.
|
||||
func (h *histogram) addMeasurement(value int64) {
|
||||
// TODO: assert invariant
|
||||
h.sum += value
|
||||
|
Reference in New Issue
Block a user