Update k8s.io dependencies to master to get selflink fix in client-go

This commit is contained in:
Michelle Au
2019-08-12 13:48:24 -07:00
parent 7e09056156
commit d38cdc2f76
315 changed files with 86399 additions and 37559 deletions

View File

@@ -30,7 +30,7 @@ type backoffEntry struct {
}
type Backoff struct {
sync.Mutex
sync.RWMutex
Clock clock.Clock
defaultDuration time.Duration
maxDuration time.Duration
@@ -57,8 +57,8 @@ func NewBackOff(initial, max time.Duration) *Backoff {
// Get the current backoff Duration
func (p *Backoff) Get(id string) time.Duration {
p.Lock()
defer p.Unlock()
p.RLock()
defer p.RUnlock()
var delay time.Duration
entry, ok := p.perItemBackoff[id]
if ok {
@@ -90,8 +90,8 @@ func (p *Backoff) Reset(id string) {
// Returns True if the elapsed time since eventTime is smaller than the current backoff window
func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool {
p.Lock()
defer p.Unlock()
p.RLock()
defer p.RUnlock()
entry, ok := p.perItemBackoff[id]
if !ok {
return false
@@ -99,13 +99,13 @@ func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool {
if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
return false
}
return p.Clock.Now().Sub(eventTime) < entry.backoff
return p.Clock.Since(eventTime) < entry.backoff
}
// Returns True if time since lastupdate is less than the current backoff window.
func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool {
p.Lock()
defer p.Unlock()
p.RLock()
defer p.RUnlock()
entry, ok := p.perItemBackoff[id]
if !ok {
return false

View File

@@ -17,6 +17,8 @@ limitations under the License.
package flowcontrol
import (
"context"
"errors"
"sync"
"time"
@@ -33,6 +35,8 @@ type RateLimiter interface {
Stop()
// QPS returns QPS of this rate limiter
QPS() float32
// Wait returns nil if a token is taken before the Context is done.
Wait(ctx context.Context) error
}
type tokenBucketRateLimiter struct {
@@ -98,6 +102,10 @@ func (t *tokenBucketRateLimiter) QPS() float32 {
return t.qps
}
func (t *tokenBucketRateLimiter) Wait(ctx context.Context) error {
return t.limiter.Wait(ctx)
}
type fakeAlwaysRateLimiter struct{}
func NewFakeAlwaysRateLimiter() RateLimiter {
@@ -116,6 +124,10 @@ func (t *fakeAlwaysRateLimiter) QPS() float32 {
return 1
}
func (t *fakeAlwaysRateLimiter) Wait(ctx context.Context) error {
return nil
}
type fakeNeverRateLimiter struct {
wg sync.WaitGroup
}
@@ -141,3 +153,7 @@ func (t *fakeNeverRateLimiter) Accept() {
func (t *fakeNeverRateLimiter) QPS() float32 {
return 1
}
func (t *fakeNeverRateLimiter) Wait(ctx context.Context) error {
return errors.New("can not be accept")
}

View File

@@ -18,30 +18,75 @@ package homedir
import (
"os"
"path/filepath"
"runtime"
)
// HomeDir returns the home directory for the current user
// HomeDir returns the home directory for the current user.
// On Windows:
// 1. the first of %HOME%, %HOMEDRIVE%%HOMEPATH%, %USERPROFILE% containing a `.kube\config` file is returned.
// 2. if none of those locations contain a `.kube\config` file, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists and is writeable is returned.
// 3. if none of those locations are writeable, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists is returned.
// 4. if none of those locations exists, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that is set is returned.
func HomeDir() string {
if runtime.GOOS == "windows" {
// First prefer the HOME environmental variable
if home := os.Getenv("HOME"); len(home) > 0 {
if _, err := os.Stat(home); err == nil {
return home
}
}
home := os.Getenv("HOME")
homeDriveHomePath := ""
if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 {
homeDir := homeDrive + homePath
if _, err := os.Stat(homeDir); err == nil {
return homeDir
homeDriveHomePath = homeDrive + homePath
}
userProfile := os.Getenv("USERPROFILE")
// Return first of %HOME%, %HOMEDRIVE%/%HOMEPATH%, %USERPROFILE% that contains a `.kube\config` file.
// %HOMEDRIVE%/%HOMEPATH% is preferred over %USERPROFILE% for backwards-compatibility.
for _, p := range []string{home, homeDriveHomePath, userProfile} {
if len(p) == 0 {
continue
}
if _, err := os.Stat(filepath.Join(p, ".kube", "config")); err != nil {
continue
}
return p
}
firstSetPath := ""
firstExistingPath := ""
// Prefer %USERPROFILE% over %HOMEDRIVE%/%HOMEPATH% for compatibility with other auth-writing tools
for _, p := range []string{home, userProfile, homeDriveHomePath} {
if len(p) == 0 {
continue
}
if len(firstSetPath) == 0 {
// remember the first path that is set
firstSetPath = p
}
info, err := os.Stat(p)
if err != nil {
continue
}
if len(firstExistingPath) == 0 {
// remember the first path that exists
firstExistingPath = p
}
if info.IsDir() && info.Mode().Perm()&(1<<(uint(7))) != 0 {
// return first path that is writeable
return p
}
}
if userProfile := os.Getenv("USERPROFILE"); len(userProfile) > 0 {
if _, err := os.Stat(userProfile); err == nil {
return userProfile
}
// If none are writeable, return first location that exists
if len(firstExistingPath) > 0 {
return firstExistingPath
}
// If none exist, return first location that is set
if len(firstSetPath) > 0 {
return firstSetPath
}
// We've got nothing
return ""
}
return os.Getenv("HOME")
}

View File

@@ -42,12 +42,12 @@ var DefaultBackoff = wait.Backoff{
Jitter: 0.1,
}
// RetryConflict executes the provided function repeatedly, retrying if the server returns a conflicting
// write. Callers should preserve previous executions if they wish to retry changes. It performs an
// OnError executes the provided function repeatedly, retrying if the server returns a specified
// error. Callers should preserve previous executions if they wish to retry changes. It performs an
// exponential backoff.
//
// var pod *api.Pod
// err := RetryOnConflict(DefaultBackoff, func() (err error) {
// err := retry.OnError(DefaultBackoff, errors.IsConflict, func() (err error) {
// pod, err = c.Pods("mynamespace").UpdateStatus(podStatus)
// return
// })
@@ -58,14 +58,14 @@ var DefaultBackoff = wait.Backoff{
// ...
//
// TODO: Make Backoff an interface?
func RetryOnConflict(backoff wait.Backoff, fn func() error) error {
func OnError(backoff wait.Backoff, errorFunc func(error) bool, fn func() error) error {
var lastConflictErr error
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
err := fn()
switch {
case err == nil:
return true, nil
case errors.IsConflict(err):
case errorFunc(err):
lastConflictErr = err
return false, nil
default:
@@ -77,3 +77,8 @@ func RetryOnConflict(backoff wait.Backoff, fn func() error) error {
}
return err
}
// RetryOnConflict executes the function function repeatedly, retrying if the server returns a conflicting
func RetryOnConflict(backoff wait.Backoff, fn func() error) error {
return OnError(backoff, errors.IsConflict, fn)
}

View File

@@ -18,6 +18,7 @@ package workqueue
import (
"container/heap"
"sync"
"time"
"k8s.io/apimachinery/pkg/util/clock"
@@ -43,13 +44,12 @@ func NewNamedDelayingQueue(name string) DelayingInterface {
func newDelayingQueue(clock clock.Clock, name string) DelayingInterface {
ret := &delayingType{
Interface: NewNamed(name),
clock: clock,
heartbeat: clock.NewTicker(maxWait),
stopCh: make(chan struct{}),
waitingForAddCh: make(chan *waitFor, 1000),
metrics: newRetryMetrics(name),
deprecatedMetrics: newDeprecatedRetryMetrics(name),
Interface: NewNamed(name),
clock: clock,
heartbeat: clock.NewTicker(maxWait),
stopCh: make(chan struct{}),
waitingForAddCh: make(chan *waitFor, 1000),
metrics: newRetryMetrics(name),
}
go ret.waitingLoop()
@@ -66,6 +66,8 @@ type delayingType struct {
// stopCh lets us signal a shutdown to the waiting loop
stopCh chan struct{}
// stopOnce guarantees we only signal shutdown a single time
stopOnce sync.Once
// heartbeat ensures we wait no more than maxWait before firing
heartbeat clock.Ticker
@@ -74,8 +76,7 @@ type delayingType struct {
waitingForAddCh chan *waitFor
// metrics counts the number of retries
metrics retryMetrics
deprecatedMetrics retryMetrics
metrics retryMetrics
}
// waitFor holds the data to add and the time it should be added
@@ -133,11 +134,14 @@ func (pq waitForPriorityQueue) Peek() interface{} {
return pq[0]
}
// ShutDown gives a way to shut off this queue
// ShutDown stops the queue. After the queue drains, the returned shutdown bool
// on Get() will be true. This method may be invoked more than once.
func (q *delayingType) ShutDown() {
q.Interface.ShutDown()
close(q.stopCh)
q.heartbeat.Stop()
q.stopOnce.Do(func() {
q.Interface.ShutDown()
close(q.stopCh)
q.heartbeat.Stop()
})
}
// AddAfter adds the given item to the work queue after the given delay
@@ -148,7 +152,6 @@ func (q *delayingType) AddAfter(item interface{}, duration time.Duration) {
}
q.metrics.retry()
q.deprecatedMetrics.retry()
// immediately add things with no delay
if duration <= 0 {

View File

@@ -23,4 +23,4 @@ limitations under the License.
// * Multiple consumers and producers. In particular, it is allowed for an
// item to be reenqueued while it is being processed.
// * Shutdown notifications.
package workqueue
package workqueue // import "k8s.io/client-go/util/workqueue"

View File

@@ -87,14 +87,6 @@ type defaultQueueMetrics struct {
// how long have current threads been working?
unfinishedWorkSeconds SettableGaugeMetric
longestRunningProcessor SettableGaugeMetric
// TODO(danielqsj): Remove the following metrics, they are deprecated
deprecatedDepth GaugeMetric
deprecatedAdds CounterMetric
deprecatedLatency SummaryMetric
deprecatedWorkDuration SummaryMetric
deprecatedUnfinishedWorkSeconds SettableGaugeMetric
deprecatedLongestRunningProcessor SettableGaugeMetric
}
func (m *defaultQueueMetrics) add(item t) {
@@ -103,9 +95,7 @@ func (m *defaultQueueMetrics) add(item t) {
}
m.adds.Inc()
m.deprecatedAdds.Inc()
m.depth.Inc()
m.deprecatedDepth.Inc()
if _, exists := m.addTimes[item]; !exists {
m.addTimes[item] = m.clock.Now()
}
@@ -117,11 +107,9 @@ func (m *defaultQueueMetrics) get(item t) {
}
m.depth.Dec()
m.deprecatedDepth.Dec()
m.processingStartTimes[item] = m.clock.Now()
if startTime, exists := m.addTimes[item]; exists {
m.latency.Observe(m.sinceInSeconds(startTime))
m.deprecatedLatency.Observe(m.sinceInMicroseconds(startTime))
delete(m.addTimes, item)
}
}
@@ -133,7 +121,6 @@ func (m *defaultQueueMetrics) done(item t) {
if startTime, exists := m.processingStartTimes[item]; exists {
m.workDuration.Observe(m.sinceInSeconds(startTime))
m.deprecatedWorkDuration.Observe(m.sinceInMicroseconds(startTime))
delete(m.processingStartTimes, item)
}
}
@@ -153,9 +140,7 @@ func (m *defaultQueueMetrics) updateUnfinishedWork() {
// Convert to seconds; microseconds is unhelpfully granular for this.
total /= 1000000
m.unfinishedWorkSeconds.Set(total)
m.deprecatedUnfinishedWorkSeconds.Set(total)
m.longestRunningProcessor.Set(oldest / 1000000)
m.deprecatedLongestRunningProcessor.Set(oldest) // in microseconds.
}
type noMetrics struct{}
@@ -200,13 +185,6 @@ type MetricsProvider interface {
NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric
NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric
NewRetriesMetric(name string) CounterMetric
NewDeprecatedDepthMetric(name string) GaugeMetric
NewDeprecatedAddsMetric(name string) CounterMetric
NewDeprecatedLatencyMetric(name string) SummaryMetric
NewDeprecatedWorkDurationMetric(name string) SummaryMetric
NewDeprecatedUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric
NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric
NewDeprecatedRetriesMetric(name string) CounterMetric
}
type noopMetricsProvider struct{}
@@ -239,34 +217,6 @@ func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewDeprecatedDepthMetric(name string) GaugeMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewDeprecatedAddsMetric(name string) CounterMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewDeprecatedLatencyMetric(name string) SummaryMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewDeprecatedWorkDurationMetric(name string) SummaryMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewDeprecatedUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewDeprecatedRetriesMetric(name string) CounterMetric {
return noopMetric{}
}
var globalMetricsFactory = queueMetricsFactory{
metricsProvider: noopMetricsProvider{},
}
@@ -289,21 +239,15 @@ func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) qu
return noMetrics{}
}
return &defaultQueueMetrics{
clock: clock,
depth: mp.NewDepthMetric(name),
adds: mp.NewAddsMetric(name),
latency: mp.NewLatencyMetric(name),
workDuration: mp.NewWorkDurationMetric(name),
unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name),
longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name),
deprecatedDepth: mp.NewDeprecatedDepthMetric(name),
deprecatedAdds: mp.NewDeprecatedAddsMetric(name),
deprecatedLatency: mp.NewDeprecatedLatencyMetric(name),
deprecatedWorkDuration: mp.NewDeprecatedWorkDurationMetric(name),
deprecatedUnfinishedWorkSeconds: mp.NewDeprecatedUnfinishedWorkSecondsMetric(name),
deprecatedLongestRunningProcessor: mp.NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name),
addTimes: map[t]time.Time{},
processingStartTimes: map[t]time.Time{},
clock: clock,
depth: mp.NewDepthMetric(name),
adds: mp.NewAddsMetric(name),
latency: mp.NewLatencyMetric(name),
workDuration: mp.NewWorkDurationMetric(name),
unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name),
longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name),
addTimes: map[t]time.Time{},
processingStartTimes: map[t]time.Time{},
}
}
@@ -317,16 +261,6 @@ func newRetryMetrics(name string) retryMetrics {
}
}
func newDeprecatedRetryMetrics(name string) retryMetrics {
var ret *defaultRetryMetrics
if len(name) == 0 {
return ret
}
return &defaultRetryMetrics{
retries: globalMetricsFactory.metricsProvider.NewDeprecatedRetriesMetric(name),
}
}
// SetProvider sets the metrics provider for all subsequently created work
// queues. Only the first call has an effect.
func SetProvider(metricsProvider MetricsProvider) {

View File

@@ -25,14 +25,6 @@ import (
type DoWorkPieceFunc func(piece int)
// Parallelize is a very simple framework that allows for parallelizing
// N independent pieces of work.
//
// Deprecated: Use ParallelizeUntil instead.
func Parallelize(workers, pieces int, doWorkPiece DoWorkPieceFunc) {
ParallelizeUntil(nil, workers, pieces, doWorkPiece)
}
// ParallelizeUntil is a framework that allows for parallelizing N
// independent pieces of work until done or the context is canceled.
func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc) {