1. update clientset, deepcopy using code-generator

2. add a dummy file tools.go to force "go mod vendor" to see
code-generator as dependencies
3. add a script to update CRD
4. add a README to document CRD updating steps
run go mod tidy
update README
This commit is contained in:
xiangqian
2019-12-03 01:22:21 -08:00
parent 90533183e4
commit 728e29aa7e
1128 changed files with 167705 additions and 5135 deletions

View File

@@ -52,10 +52,11 @@ import (
)
const (
prefaceTimeout = 10 * time.Second
firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
handlerChunkWriteSize = 4 << 10
defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
prefaceTimeout = 10 * time.Second
firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
handlerChunkWriteSize = 4 << 10
defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
maxQueuedControlFrames = 10000
)
var (
@@ -163,6 +164,15 @@ func (s *Server) maxConcurrentStreams() uint32 {
return defaultMaxStreams
}
// maxQueuedControlFrames is the maximum number of control frames like
// SETTINGS, PING and RST_STREAM that will be queued for writing before
// the connection is closed to prevent memory exhaustion attacks.
func (s *Server) maxQueuedControlFrames() int {
// TODO: if anybody asks, add a Server field, and remember to define the
// behavior of negative values.
return maxQueuedControlFrames
}
type serverInternalState struct {
mu sync.Mutex
activeConns map[*serverConn]struct{}
@@ -506,6 +516,7 @@ type serverConn struct {
sawFirstSettings bool // got the initial SETTINGS frame after the preface
needToSendSettingsAck bool
unackedSettings int // how many SETTINGS have we sent without ACKs?
queuedControlFrames int // control frames in the writeSched queue
clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
curClientStreams uint32 // number of open streams initiated by the client
@@ -894,6 +905,14 @@ func (sc *serverConn) serve() {
}
}
// If the peer is causing us to generate a lot of control frames,
// but not reading them from us, assume they are trying to make us
// run out of memory.
if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() {
sc.vlogf("http2: too many control frames in send queue, closing connection")
return
}
// Start the shutdown timer after sending a GOAWAY. When sending GOAWAY
// with no error code (graceful shutdown), don't start the timer until
// all open streams have been completed.
@@ -1093,6 +1112,14 @@ func (sc *serverConn) writeFrame(wr FrameWriteRequest) {
}
if !ignoreWrite {
if wr.isControl() {
sc.queuedControlFrames++
// For extra safety, detect wraparounds, which should not happen,
// and pull the plug.
if sc.queuedControlFrames < 0 {
sc.conn.Close()
}
}
sc.writeSched.Push(wr)
}
sc.scheduleFrameWrite()
@@ -1210,10 +1237,8 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
// If a frame is already being written, nothing happens. This will be called again
// when the frame is done being written.
//
// If a frame isn't being written we need to send one, the best frame
// to send is selected, preferring first things that aren't
// stream-specific (e.g. ACKing settings), and then finding the
// highest priority stream.
// If a frame isn't being written and we need to send one, the best frame
// to send is selected by writeSched.
//
// If a frame isn't being written and there's nothing else to send, we
// flush the write buffer.
@@ -1241,6 +1266,9 @@ func (sc *serverConn) scheduleFrameWrite() {
}
if !sc.inGoAway || sc.goAwayCode == ErrCodeNo {
if wr, ok := sc.writeSched.Pop(); ok {
if wr.isControl() {
sc.queuedControlFrames--
}
sc.startFrameWrite(wr)
continue
}
@@ -1533,6 +1561,8 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error {
if err := f.ForeachSetting(sc.processSetting); err != nil {
return err
}
// TODO: judging by RFC 7540, Section 6.5.3 each SETTINGS frame should be
// acknowledged individually, even if multiple are received before the ACK.
sc.needToSendSettingsAck = true
sc.scheduleFrameWrite()
return nil

View File

@@ -992,7 +992,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf
req.Method != "HEAD" {
// Request gzip only, not deflate. Deflate is ambiguous and
// not as universally supported anyway.
// See: http://www.gzip.org/zlib/zlib_faq.html#faq38
// See: https://zlib.net/zlib_faq.html#faq39
//
// Note that we don't request this for HEAD requests,
// due to a bug in nginx:

View File

@@ -32,7 +32,7 @@ type WriteScheduler interface {
// Pop dequeues the next frame to write. Returns false if no frames can
// be written. Frames with a given wr.StreamID() are Pop'd in the same
// order they are Push'd.
// order they are Push'd. No frames should be discarded except by CloseStream.
Pop() (wr FrameWriteRequest, ok bool)
}
@@ -76,6 +76,12 @@ func (wr FrameWriteRequest) StreamID() uint32 {
return wr.stream.id
}
// isControl reports whether wr is a control frame for MaxQueuedControlFrames
// purposes. That includes non-stream frames and RST_STREAM frames.
func (wr FrameWriteRequest) isControl() bool {
return wr.stream == nil
}
// DataSize returns the number of flow control bytes that must be consumed
// to write this entire frame. This is 0 for non-DATA frames.
func (wr FrameWriteRequest) DataSize() int {

View File

@@ -19,7 +19,8 @@ type randomWriteScheduler struct {
zero writeQueue
// sq contains the stream-specific queues, keyed by stream ID.
// When a stream is idle or closed, it's deleted from the map.
// When a stream is idle, closed, or emptied, it's deleted
// from the map.
sq map[uint32]*writeQueue
// pool of empty queues for reuse.
@@ -63,8 +64,12 @@ func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {
return ws.zero.shift(), true
}
// Iterate over all non-idle streams until finding one that can be consumed.
for _, q := range ws.sq {
for streamID, q := range ws.sq {
if wr, ok := q.consume(math.MaxInt32); ok {
if q.empty() {
delete(ws.sq, streamID)
ws.queuePool.put(q)
}
return wr, true
}
}