Bump dependencies to Kubernetes 1.20
This commit is contained in:
105
vendor/google.golang.org/grpc/stream.go
generated
vendored
105
vendor/google.golang.org/grpc/stream.go
generated
vendored
@@ -35,6 +35,9 @@ import (
|
||||
"google.golang.org/grpc/internal/binarylog"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
iresolver "google.golang.org/grpc/internal/resolver"
|
||||
"google.golang.org/grpc/internal/serviceconfig"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
@@ -169,7 +172,18 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
||||
if err := cc.waitForResolvedAddrs(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mc := cc.GetMethodConfig(method)
|
||||
|
||||
var mc serviceconfig.MethodConfig
|
||||
var onCommit func()
|
||||
rpcConfig := cc.safeConfigSelector.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: method})
|
||||
if rpcConfig != nil {
|
||||
if rpcConfig.Context != nil {
|
||||
ctx = rpcConfig.Context
|
||||
}
|
||||
mc = rpcConfig.MethodConfig
|
||||
onCommit = rpcConfig.OnCommitted
|
||||
}
|
||||
|
||||
if mc.WaitForReady != nil {
|
||||
c.failFast = !*mc.WaitForReady
|
||||
}
|
||||
@@ -271,13 +285,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
||||
cancel: cancel,
|
||||
beginTime: beginTime,
|
||||
firstAttempt: true,
|
||||
onCommit: onCommit,
|
||||
}
|
||||
if !cc.dopts.disableRetry {
|
||||
cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
|
||||
}
|
||||
cs.binlog = binarylog.GetMethodLogger(method)
|
||||
|
||||
cs.callInfo.stream = cs
|
||||
// Only this initial attempt has stats/tracing.
|
||||
// TODO(dfawley): move to newAttempt when per-attempt stats are implemented.
|
||||
if err := cs.newAttemptLocked(sh, trInfo); err != nil {
|
||||
@@ -347,7 +361,16 @@ func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (r
|
||||
if err := cs.ctx.Err(); err != nil {
|
||||
return toRPCErr(err)
|
||||
}
|
||||
t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method)
|
||||
|
||||
ctx := cs.ctx
|
||||
if cs.cc.parsedTarget.Scheme == "xds" {
|
||||
// Add extra metadata (metadata that will be added by transport) to context
|
||||
// so the balancer can see them.
|
||||
ctx = grpcutil.WithExtraMetadata(cs.ctx, metadata.Pairs(
|
||||
"content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype),
|
||||
))
|
||||
}
|
||||
t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -365,6 +388,11 @@ func (a *csAttempt) newStream() error {
|
||||
cs.callHdr.PreviousAttempts = cs.numRetries
|
||||
s, err := a.t.NewStream(cs.ctx, cs.callHdr)
|
||||
if err != nil {
|
||||
if _, ok := err.(transport.PerformedIOError); ok {
|
||||
// Return without converting to an RPC error so retry code can
|
||||
// inspect.
|
||||
return err
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
cs.attempt.s = s
|
||||
@@ -418,7 +446,8 @@ type clientStream struct {
|
||||
// place where we need to check if the attempt is nil.
|
||||
attempt *csAttempt
|
||||
// TODO(hedging): hedging will have multiple attempts simultaneously.
|
||||
committed bool // active attempt committed for retry?
|
||||
committed bool // active attempt committed for retry?
|
||||
onCommit func()
|
||||
buffer []func(a *csAttempt) error // operations to replay on retry
|
||||
bufferSize int // current size of buffer
|
||||
}
|
||||
@@ -447,6 +476,9 @@ type csAttempt struct {
|
||||
}
|
||||
|
||||
func (cs *clientStream) commitAttemptLocked() {
|
||||
if !cs.committed && cs.onCommit != nil {
|
||||
cs.onCommit()
|
||||
}
|
||||
cs.committed = true
|
||||
cs.buffer = nil
|
||||
}
|
||||
@@ -460,11 +492,21 @@ func (cs *clientStream) commitAttempt() {
|
||||
// shouldRetry returns nil if the RPC should be retried; otherwise it returns
|
||||
// the error that should be returned by the operation.
|
||||
func (cs *clientStream) shouldRetry(err error) error {
|
||||
if cs.attempt.s == nil && !cs.callInfo.failFast {
|
||||
// In the event of any error from NewStream (attempt.s == nil), we
|
||||
// never attempted to write anything to the wire, so we can retry
|
||||
// indefinitely for non-fail-fast RPCs.
|
||||
return nil
|
||||
unprocessed := false
|
||||
if cs.attempt.s == nil {
|
||||
pioErr, ok := err.(transport.PerformedIOError)
|
||||
if ok {
|
||||
// Unwrap error.
|
||||
err = toRPCErr(pioErr.Err)
|
||||
} else {
|
||||
unprocessed = true
|
||||
}
|
||||
if !ok && !cs.callInfo.failFast {
|
||||
// In the event of a non-IO operation error from NewStream, we
|
||||
// never attempted to write anything to the wire, so we can retry
|
||||
// indefinitely for non-fail-fast RPCs.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if cs.finished || cs.committed {
|
||||
// RPC is finished or committed; cannot retry.
|
||||
@@ -473,13 +515,12 @@ func (cs *clientStream) shouldRetry(err error) error {
|
||||
// Wait for the trailers.
|
||||
if cs.attempt.s != nil {
|
||||
<-cs.attempt.s.Done()
|
||||
unprocessed = cs.attempt.s.Unprocessed()
|
||||
}
|
||||
if cs.firstAttempt && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) {
|
||||
if cs.firstAttempt && unprocessed {
|
||||
// First attempt, stream unprocessed: transparently retry.
|
||||
cs.firstAttempt = false
|
||||
return nil
|
||||
}
|
||||
cs.firstAttempt = false
|
||||
if cs.cc.dopts.disableRetry {
|
||||
return err
|
||||
}
|
||||
@@ -497,13 +538,13 @@ func (cs *clientStream) shouldRetry(err error) error {
|
||||
if len(sps) == 1 {
|
||||
var e error
|
||||
if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
|
||||
channelz.Infof(cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0])
|
||||
channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0])
|
||||
cs.retryThrottler.throttle() // This counts as a failure for throttling.
|
||||
return err
|
||||
}
|
||||
hasPushback = true
|
||||
} else if len(sps) > 1 {
|
||||
channelz.Warningf(cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps)
|
||||
channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps)
|
||||
cs.retryThrottler.throttle() // This counts as a failure for throttling.
|
||||
return err
|
||||
}
|
||||
@@ -516,8 +557,8 @@ func (cs *clientStream) shouldRetry(err error) error {
|
||||
code = status.Convert(err).Code()
|
||||
}
|
||||
|
||||
rp := cs.methodConfig.retryPolicy
|
||||
if rp == nil || !rp.retryableStatusCodes[code] {
|
||||
rp := cs.methodConfig.RetryPolicy
|
||||
if rp == nil || !rp.RetryableStatusCodes[code] {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -526,7 +567,7 @@ func (cs *clientStream) shouldRetry(err error) error {
|
||||
if cs.retryThrottler.throttle() {
|
||||
return err
|
||||
}
|
||||
if cs.numRetries+1 >= rp.maxAttempts {
|
||||
if cs.numRetries+1 >= rp.MaxAttempts {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -535,9 +576,9 @@ func (cs *clientStream) shouldRetry(err error) error {
|
||||
dur = time.Millisecond * time.Duration(pushback)
|
||||
cs.numRetriesSincePushback = 0
|
||||
} else {
|
||||
fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback))
|
||||
cur := float64(rp.initialBackoff) * fact
|
||||
if max := float64(rp.maxBackoff); cur > max {
|
||||
fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback))
|
||||
cur := float64(rp.InitialBackoff) * fact
|
||||
if max := float64(rp.MaxBackoff); cur > max {
|
||||
cur = max
|
||||
}
|
||||
dur = time.Duration(grpcrand.Int63n(int64(cur)))
|
||||
@@ -565,6 +606,7 @@ func (cs *clientStream) retryLocked(lastErr error) error {
|
||||
cs.commitAttemptLocked()
|
||||
return err
|
||||
}
|
||||
cs.firstAttempt = false
|
||||
if err := cs.newAttemptLocked(nil, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -799,6 +841,15 @@ func (cs *clientStream) finish(err error) {
|
||||
}
|
||||
cs.finished = true
|
||||
cs.commitAttemptLocked()
|
||||
if cs.attempt != nil {
|
||||
cs.attempt.finish(err)
|
||||
// after functions all rely upon having a stream.
|
||||
if cs.attempt.s != nil {
|
||||
for _, o := range cs.opts {
|
||||
o.after(cs.callInfo, cs.attempt)
|
||||
}
|
||||
}
|
||||
}
|
||||
cs.mu.Unlock()
|
||||
// For binary logging. only log cancel in finish (could be caused by RPC ctx
|
||||
// canceled or ClientConn closed). Trailer will be logged in RecvMsg.
|
||||
@@ -820,15 +871,6 @@ func (cs *clientStream) finish(err error) {
|
||||
cs.cc.incrCallsSucceeded()
|
||||
}
|
||||
}
|
||||
if cs.attempt != nil {
|
||||
cs.attempt.finish(err)
|
||||
// after functions all rely upon having a stream.
|
||||
if cs.attempt.s != nil {
|
||||
for _, o := range cs.opts {
|
||||
o.after(cs.callInfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
cs.cancel()
|
||||
}
|
||||
|
||||
@@ -905,7 +947,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
|
||||
Payload: m,
|
||||
// TODO truncate large payload.
|
||||
Data: payInfo.uncompressedBytes,
|
||||
WireLength: payInfo.wireLength,
|
||||
WireLength: payInfo.wireLength + headerLen,
|
||||
Length: len(payInfo.uncompressedBytes),
|
||||
})
|
||||
}
|
||||
@@ -1066,7 +1108,6 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
|
||||
t: t,
|
||||
}
|
||||
|
||||
as.callInfo.stream = as
|
||||
s, err := as.t.NewStream(as.ctx, as.callHdr)
|
||||
if err != nil {
|
||||
err = toRPCErr(err)
|
||||
@@ -1488,7 +1529,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
||||
Payload: m,
|
||||
// TODO truncate large payload.
|
||||
Data: payInfo.uncompressedBytes,
|
||||
WireLength: payInfo.wireLength,
|
||||
WireLength: payInfo.wireLength + headerLen,
|
||||
Length: len(payInfo.uncompressedBytes),
|
||||
})
|
||||
}
|
||||
|
Reference in New Issue
Block a user