Bumping k8s version to 1.13.0-beta.1

This commit is contained in:
Cheng Xing
2018-11-19 14:10:50 -08:00
parent 01bd7f356e
commit e2f1bdc372
633 changed files with 11189 additions and 126194 deletions

View File

@@ -23,7 +23,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"github.com/golang/glog"
"k8s.io/klog"
)
// NewDeltaFIFO returns a Store which can be used process changes to items.
@@ -320,17 +320,15 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err
newDeltas := append(f.items[id], Delta{actionType, obj})
newDeltas = dedupDeltas(newDeltas)
_, exists := f.items[id]
if len(newDeltas) > 0 {
if !exists {
if _, exists := f.items[id]; !exists {
f.queue = append(f.queue, id)
}
f.items[id] = newDeltas
f.cond.Broadcast()
} else if exists {
// We need to remove this from our map (extra items
// in the queue are ignored if they are not in the
// map).
} else {
// We need to remove this from our map (extra items in the queue are
// ignored if they are not in the map).
delete(f.items, id)
}
return nil
@@ -348,9 +346,6 @@ func (f *DeltaFIFO) List() []interface{} {
func (f *DeltaFIFO) listLocked() []interface{} {
list := make([]interface{}, 0, len(f.items))
for _, item := range f.items {
// Copy item's slice so operations on this slice
// won't interfere with the object we return.
item = copyDeltas(item)
list = append(list, item.Newest().Object)
}
return list
@@ -398,10 +393,7 @@ func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err err
func (f *DeltaFIFO) IsClosed() bool {
f.closedLock.Lock()
defer f.closedLock.Unlock()
if f.closed {
return true
}
return false
return f.closed
}
// Pop blocks until an item is added to the queue, and then returns it. If
@@ -432,10 +424,10 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
}
id := f.queue[0]
f.queue = f.queue[1:]
item, ok := f.items[id]
if f.initialPopulationCount > 0 {
f.initialPopulationCount--
}
item, ok := f.items[id]
if !ok {
// Item may have been deleted subsequently.
continue
@@ -506,10 +498,10 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
deletedObj, exists, err := f.knownObjects.GetByKey(k)
if err != nil {
deletedObj = nil
glog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k)
klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k)
} else if !exists {
deletedObj = nil
glog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k)
klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k)
}
queuedDeletions++
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
@@ -553,10 +545,10 @@ func (f *DeltaFIFO) syncKey(key string) error {
func (f *DeltaFIFO) syncKeyLocked(key string) error {
obj, exists, err := f.knownObjects.GetByKey(key)
if err != nil {
glog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key)
klog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key)
return nil
} else if !exists {
glog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key)
klog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key)
return nil
}

View File

@@ -20,8 +20,8 @@ import (
"sync"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/klog"
)
// ExpirationCache implements the store interface
@@ -95,7 +95,7 @@ func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
return nil, false
}
if c.expirationPolicy.IsExpired(timestampedItem) {
glog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj)
klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj)
c.cacheStorage.Delete(key)
return nil, false
}

View File

@@ -17,7 +17,7 @@ limitations under the License.
package cache
import (
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
@@ -60,7 +60,7 @@ func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selec
items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace})
if err != nil {
// Ignore error; do slow search without index.
glog.Warningf("can not retrieve list of objects using index : %v", err)
klog.Warningf("can not retrieve list of objects using index : %v", err)
for _, m := range indexer.List() {
metadata, err := meta.Accessor(m)
if err != nil {

View File

@@ -22,7 +22,7 @@ import (
"sync"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
@@ -156,7 +156,7 @@ func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, er
}
elements, err := fn(updated)
if err != nil {
glog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err)
klog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err)
continue
}
for _, inIndex := range elements {

View File

@@ -24,7 +24,7 @@ import (
"sync"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/diff"
@@ -45,7 +45,7 @@ func NewCacheMutationDetector(name string) CacheMutationDetector {
if !mutationDetectionEnabled {
return dummyMutationDetector{}
}
glog.Warningln("Mutation detector is enabled, this will result in memory leakage.")
klog.Warningln("Mutation detector is enabled, this will result in memory leakage.")
return &defaultCacheMutationDetector{name: name, period: 1 * time.Second}
}

View File

@@ -31,7 +31,6 @@ import (
"syscall"
"time"
"github.com/golang/glog"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -41,6 +40,7 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/klog"
)
// Reflector watches a specified resource and causes all changes to be reflected in the given store.
@@ -128,7 +128,7 @@ var internalPackages = []string{"client-go/tools/cache/"}
// Run starts a watch and handles watch events. Will restart the watch if it is closed.
// Run will exit when stopCh is closed.
func (r *Reflector) Run(stopCh <-chan struct{}) {
glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
klog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
wait.Until(func() {
if err := r.ListAndWatch(stopCh); err != nil {
utilruntime.HandleError(err)
@@ -166,7 +166,7 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) {
// and then use the resource version to watch.
// It returns error if ListAndWatch didn't even try to initialize watch.
func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
glog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name)
klog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name)
var resourceVersion string
// Explicitly set "0" as resource version - it's fine for the List()
@@ -212,7 +212,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
return
}
if r.ShouldResync == nil || r.ShouldResync() {
glog.V(4).Infof("%s: forcing resync", r.name)
klog.V(4).Infof("%s: forcing resync", r.name)
if err := r.store.Resync(); err != nil {
resyncerrc <- err
return
@@ -246,7 +246,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
case io.EOF:
// watch closed normally
case io.ErrUnexpectedEOF:
glog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err)
klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err)
default:
utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err))
}
@@ -267,7 +267,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil {
if err != errorStopRequested {
glog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
}
return nil
}
@@ -354,7 +354,7 @@ loop:
r.metrics.numberOfShortWatches.Inc()
return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
}
glog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount)
klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount)
return nil
}

View File

@@ -28,7 +28,7 @@ import (
"k8s.io/client-go/util/buffer"
"k8s.io/client-go/util/retry"
"github.com/golang/glog"
"k8s.io/klog"
)
// SharedInformer has a shared data cache and is capable of distributing notifications for changes
@@ -116,11 +116,11 @@ func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool
},
stopCh)
if err != nil {
glog.V(2).Infof("stop requested")
klog.V(2).Infof("stop requested")
return false
}
glog.V(4).Infof("caches populated")
klog.V(4).Infof("caches populated")
return true
}
@@ -279,11 +279,11 @@ func determineResyncPeriod(desired, check time.Duration) time.Duration {
return desired
}
if check == 0 {
glog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired)
klog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired)
return 0
}
if desired < check {
glog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check)
klog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check)
return check
}
return desired
@@ -296,19 +296,19 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv
defer s.startedLock.Unlock()
if s.stopped {
glog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler)
klog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler)
return
}
if resyncPeriod > 0 {
if resyncPeriod < minimumResyncPeriod {
glog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod)
klog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod)
resyncPeriod = minimumResyncPeriod
}
if resyncPeriod < s.resyncCheckPeriod {
if s.started {
glog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
klog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
resyncPeriod = s.resyncCheckPeriod
} else {
// if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update

View File

@@ -23,7 +23,7 @@ import (
"reflect"
"testing"
"github.com/ghodss/yaml"
"sigs.k8s.io/yaml"
)
func newMergedConfig(certFile, certContent, keyFile, keyContent, caFile, caContent string, t *testing.T) Config {

View File

@@ -19,7 +19,7 @@ package api
import (
"fmt"
"github.com/ghodss/yaml"
"sigs.k8s.io/yaml"
)
func Example_emptyConfig() {

View File

@@ -24,8 +24,8 @@ import (
"os"
"strings"
"github.com/golang/glog"
"github.com/imdario/mergo"
"k8s.io/klog"
restclient "k8s.io/client-go/rest"
clientauth "k8s.io/client-go/tools/auth"
@@ -545,12 +545,12 @@ func (config *inClusterClientConfig) Possible() bool {
// to the default config.
func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*restclient.Config, error) {
if kubeconfigPath == "" && masterUrl == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.")
klog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.")
kubeconfig, err := restclient.InClusterConfig()
if err == nil {
return kubeconfig, nil
}
glog.Warning("error creating inClusterConfig, falling back to default config: ", err)
klog.Warning("error creating inClusterConfig, falling back to default config: ", err)
}
return NewNonInteractiveDeferredLoadingClientConfig(
&ClientConfigLoadingRules{ExplicitPath: kubeconfigPath},

View File

@@ -24,7 +24,7 @@ import (
"reflect"
"sort"
"github.com/golang/glog"
"k8s.io/klog"
restclient "k8s.io/client-go/rest"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
@@ -483,7 +483,7 @@ func getConfigFromFile(filename string) (*clientcmdapi.Config, error) {
func GetConfigFromFileOrDie(filename string) *clientcmdapi.Config {
config, err := getConfigFromFile(filename)
if err != nil {
glog.FatalDepth(1, err)
klog.FatalDepth(1, err)
}
return config

View File

@@ -27,8 +27,8 @@ import (
goruntime "runtime"
"strings"
"github.com/golang/glog"
"github.com/imdario/mergo"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -356,7 +356,7 @@ func LoadFromFile(filename string) (*clientcmdapi.Config, error) {
if err != nil {
return nil, err
}
glog.V(6).Infoln("Config loaded from file", filename)
klog.V(6).Infoln("Config loaded from file", filename)
// set LocationOfOrigin on every Cluster, User, and Context
for key, obj := range config.AuthInfos {

View File

@@ -26,7 +26,7 @@ import (
"strings"
"testing"
"github.com/ghodss/yaml"
"sigs.k8s.io/yaml"
"k8s.io/apimachinery/pkg/runtime"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"

View File

@@ -20,7 +20,7 @@ import (
"io"
"sync"
"github.com/golang/glog"
"k8s.io/klog"
restclient "k8s.io/client-go/rest"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
@@ -119,7 +119,7 @@ func (config *DeferredLoadingClientConfig) ClientConfig() (*restclient.Config, e
// check for in-cluster configuration and use it
if config.icc.Possible() {
glog.V(4).Infof("Using in-cluster configuration")
klog.V(4).Infof("Using in-cluster configuration")
return config.icc.ClientConfig()
}
@@ -156,7 +156,7 @@ func (config *DeferredLoadingClientConfig) Namespace() (string, bool, error) {
}
}
glog.V(4).Infof("Using in-cluster namespace")
klog.V(4).Infof("Using in-cluster namespace")
// allow the namespace from the service account token directory to be used.
return config.icc.Namespace()

View File

@@ -0,0 +1,69 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package leaderelection
import (
"net/http"
"sync"
"time"
)
// HealthzAdaptor associates the /healthz endpoint with the LeaderElection object.
// It helps deal with the /healthz endpoint being set up prior to the LeaderElection.
// This contains the code needed to act as an adaptor between the leader
// election code the health check code. It allows us to provide health
// status about the leader election. Most specifically about if the leader
// has failed to renew without exiting the process. In that case we should
// report not healthy and rely on the kubelet to take down the process.
type HealthzAdaptor struct {
pointerLock sync.Mutex
le *LeaderElector
timeout time.Duration
}
// Name returns the name of the health check we are implementing.
func (l *HealthzAdaptor) Name() string {
return "leaderElection"
}
// Check is called by the healthz endpoint handler.
// It fails (returns an error) if we own the lease but had not been able to renew it.
func (l *HealthzAdaptor) Check(req *http.Request) error {
l.pointerLock.Lock()
defer l.pointerLock.Unlock()
if l.le == nil {
return nil
}
return l.le.Check(l.timeout)
}
// SetLeaderElection ties a leader election object to a HealthzAdaptor
func (l *HealthzAdaptor) SetLeaderElection(le *LeaderElector) {
l.pointerLock.Lock()
defer l.pointerLock.Unlock()
l.le = le
}
// NewLeaderHealthzAdaptor creates a basic healthz adaptor to monitor a leader election.
// timeout determines the time beyond the lease expiry to be allowed for timeout.
// checks within the timeout period after the lease expires will still return healthy.
func NewLeaderHealthzAdaptor(timeout time.Duration) *HealthzAdaptor {
result := &HealthzAdaptor{
timeout: timeout,
}
return result
}

View File

@@ -0,0 +1,175 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package leaderelection
import (
"fmt"
"testing"
"time"
"k8s.io/apimachinery/pkg/util/clock"
rl "k8s.io/client-go/tools/leaderelection/resourcelock"
"net/http"
)
type fakeLock struct {
identity string
}
// Get is a dummy to allow us to have a fakeLock for testing.
func (fl *fakeLock) Get() (ler *rl.LeaderElectionRecord, err error) {
return nil, nil
}
// Create is a dummy to allow us to have a fakeLock for testing.
func (fl *fakeLock) Create(ler rl.LeaderElectionRecord) error {
return nil
}
// Update is a dummy to allow us to have a fakeLock for testing.
func (fl *fakeLock) Update(ler rl.LeaderElectionRecord) error {
return nil
}
// RecordEvent is a dummy to allow us to have a fakeLock for testing.
func (fl *fakeLock) RecordEvent(string) {}
// Identity is a dummy to allow us to have a fakeLock for testing.
func (fl *fakeLock) Identity() string {
return fl.identity
}
// Describe is a dummy to allow us to have a fakeLock for testing.
func (fl *fakeLock) Describe() string {
return "Dummy implementation of lock for testing"
}
// TestLeaderElectionHealthChecker tests that the healthcheck for leader election handles its edge cases.
func TestLeaderElectionHealthChecker(t *testing.T) {
current := time.Now()
req := &http.Request{}
tests := []struct {
description string
expected error
adaptorTimeout time.Duration
elector *LeaderElector
}{
{
description: "call check before leader elector initialized",
expected: nil,
adaptorTimeout: time.Second * 20,
elector: nil,
},
{
description: "call check when the the lease is far expired",
expected: fmt.Errorf("failed election to renew leadership on lease %s", "foo"),
adaptorTimeout: time.Second * 20,
elector: &LeaderElector{
config: LeaderElectionConfig{
Lock: &fakeLock{identity: "healthTest"},
LeaseDuration: time.Minute,
Name: "foo",
},
observedRecord: rl.LeaderElectionRecord{
HolderIdentity: "healthTest",
},
observedTime: current,
clock: clock.NewFakeClock(current.Add(time.Hour)),
},
},
{
description: "call check when the the lease is far expired but held by another server",
expected: nil,
adaptorTimeout: time.Second * 20,
elector: &LeaderElector{
config: LeaderElectionConfig{
Lock: &fakeLock{identity: "healthTest"},
LeaseDuration: time.Minute,
Name: "foo",
},
observedRecord: rl.LeaderElectionRecord{
HolderIdentity: "otherServer",
},
observedTime: current,
clock: clock.NewFakeClock(current.Add(time.Hour)),
},
},
{
description: "call check when the the lease is not expired",
expected: nil,
adaptorTimeout: time.Second * 20,
elector: &LeaderElector{
config: LeaderElectionConfig{
Lock: &fakeLock{identity: "healthTest"},
LeaseDuration: time.Minute,
Name: "foo",
},
observedRecord: rl.LeaderElectionRecord{
HolderIdentity: "healthTest",
},
observedTime: current,
clock: clock.NewFakeClock(current),
},
},
{
description: "call check when the the lease is expired but inside the timeout",
expected: nil,
adaptorTimeout: time.Second * 20,
elector: &LeaderElector{
config: LeaderElectionConfig{
Lock: &fakeLock{identity: "healthTest"},
LeaseDuration: time.Minute,
Name: "foo",
},
observedRecord: rl.LeaderElectionRecord{
HolderIdentity: "healthTest",
},
observedTime: current,
clock: clock.NewFakeClock(current.Add(time.Minute).Add(time.Second)),
},
},
}
for _, test := range tests {
adaptor := NewLeaderHealthzAdaptor(test.adaptorTimeout)
if adaptor.le != nil {
t.Errorf("[%s] leaderChecker started with a LeaderElector %v", test.description, adaptor.le)
}
if test.elector != nil {
test.elector.config.WatchDog = adaptor
adaptor.SetLeaderElection(test.elector)
if adaptor.le == nil {
t.Errorf("[%s] adaptor failed to set the LeaderElector", test.description)
}
}
err := adaptor.Check(req)
if test.expected == nil {
if err == nil {
continue
}
t.Errorf("[%s] called check, expected no error but received \"%v\"", test.description, err)
} else {
if err == nil {
t.Errorf("[%s] called check and failed to received the expected error \"%v\"", test.description, test.expected)
}
if err.Error() != test.expected.Error() {
t.Errorf("[%s] called check, expected %v, received %v", test.description, test.expected, err)
}
}
}
}

View File

@@ -56,11 +56,12 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
rl "k8s.io/client-go/tools/leaderelection/resourcelock"
"github.com/golang/glog"
"k8s.io/klog"
)
const (
@@ -90,6 +91,7 @@ func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) {
}
return &LeaderElector{
config: lec,
clock: clock.RealClock{},
}, nil
}
@@ -111,6 +113,13 @@ type LeaderElectionConfig struct {
// Callbacks are callbacks that are triggered during certain lifecycle
// events of the LeaderElector
Callbacks LeaderCallbacks
// WatchDog is the associated health checker
// WatchDog may be null if its not needed/configured.
WatchDog *HealthzAdaptor
// Name is the name of the resource lock for debugging
Name string
}
// LeaderCallbacks are callbacks that are triggered during certain
@@ -139,6 +148,12 @@ type LeaderElector struct {
// value observedRecord.HolderIdentity if the transition has
// not yet been reported.
reportedLeader string
// clock is wrapper around time to allow for less flaky testing
clock clock.Clock
// name is the name of the resource lock for debugging
name string
}
// Run starts the leader election loop
@@ -163,6 +178,9 @@ func RunOrDie(ctx context.Context, lec LeaderElectionConfig) {
if err != nil {
panic(err)
}
if lec.WatchDog != nil {
lec.WatchDog.SetLeaderElection(le)
}
le.Run(ctx)
}
@@ -184,16 +202,16 @@ func (le *LeaderElector) acquire(ctx context.Context) bool {
defer cancel()
succeeded := false
desc := le.config.Lock.Describe()
glog.Infof("attempting to acquire leader lease %v...", desc)
klog.Infof("attempting to acquire leader lease %v...", desc)
wait.JitterUntil(func() {
succeeded = le.tryAcquireOrRenew()
le.maybeReportTransition()
if !succeeded {
glog.V(4).Infof("failed to acquire lease %v", desc)
klog.V(4).Infof("failed to acquire lease %v", desc)
return
}
le.config.Lock.RecordEvent("became leader")
glog.Infof("successfully acquired lease %v", desc)
klog.Infof("successfully acquired lease %v", desc)
cancel()
}, le.config.RetryPeriod, JitterFactor, true, ctx.Done())
return succeeded
@@ -224,11 +242,11 @@ func (le *LeaderElector) renew(ctx context.Context) {
le.maybeReportTransition()
desc := le.config.Lock.Describe()
if err == nil {
glog.V(5).Infof("successfully renewed lease %v", desc)
klog.V(5).Infof("successfully renewed lease %v", desc)
return
}
le.config.Lock.RecordEvent("stopped leading")
glog.Infof("failed to renew lease %v: %v", desc, err)
klog.Infof("failed to renew lease %v: %v", desc, err)
cancel()
}, le.config.RetryPeriod, ctx.Done())
}
@@ -249,26 +267,26 @@ func (le *LeaderElector) tryAcquireOrRenew() bool {
oldLeaderElectionRecord, err := le.config.Lock.Get()
if err != nil {
if !errors.IsNotFound(err) {
glog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err)
klog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err)
return false
}
if err = le.config.Lock.Create(leaderElectionRecord); err != nil {
glog.Errorf("error initially creating leader election record: %v", err)
klog.Errorf("error initially creating leader election record: %v", err)
return false
}
le.observedRecord = leaderElectionRecord
le.observedTime = time.Now()
le.observedTime = le.clock.Now()
return true
}
// 2. Record obtained, check the Identity & Time
if !reflect.DeepEqual(le.observedRecord, *oldLeaderElectionRecord) {
le.observedRecord = *oldLeaderElectionRecord
le.observedTime = time.Now()
le.observedTime = le.clock.Now()
}
if le.observedTime.Add(le.config.LeaseDuration).After(now.Time) &&
!le.IsLeader() {
glog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity)
klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity)
return false
}
@@ -283,11 +301,11 @@ func (le *LeaderElector) tryAcquireOrRenew() bool {
// update the lock itself
if err = le.config.Lock.Update(leaderElectionRecord); err != nil {
glog.Errorf("Failed to update lock: %v", err)
klog.Errorf("Failed to update lock: %v", err)
return false
}
le.observedRecord = leaderElectionRecord
le.observedTime = time.Now()
le.observedTime = le.clock.Now()
return true
}
@@ -300,3 +318,19 @@ func (le *LeaderElector) maybeReportTransition() {
go le.config.Callbacks.OnNewLeader(le.reportedLeader)
}
}
// Check will determine if the current lease is expired by more than timeout.
func (le *LeaderElector) Check(maxTolerableExpiredLease time.Duration) error {
if !le.IsLeader() {
// Currently not concerned with the case that we are hot standby
return nil
}
// If we are more than timeout seconds after the lease duration that is past the timeout
// on the lease renew. Time to start reporting ourselves as unhealthy. We should have
// died but conditions like deadlock can prevent this. (See #70819)
if le.clock.Since(le.observedTime) > le.config.LeaseDuration+maxTolerableExpiredLease {
return fmt.Errorf("failed election to renew leadership on lease %s", le.config.Name)
}
return nil
}

View File

@@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
fakecorev1 "k8s.io/client-go/kubernetes/typed/core/v1/fake"
core "k8s.io/client-go/testing"
rl "k8s.io/client-go/tools/leaderelection/resourcelock"
@@ -257,6 +258,7 @@ func testTryAcquireOrRenew(t *testing.T, objectType string) {
config: lec,
observedRecord: test.observedRecord,
observedTime: test.observedTime,
clock: clock.RealClock{},
}
if test.expectSuccess != le.tryAcquireOrRenew() {

View File

@@ -33,7 +33,7 @@ import (
"net/http"
"github.com/golang/glog"
"k8s.io/klog"
)
const maxTriesPerEvent = 12
@@ -144,7 +144,7 @@ func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrela
}
tries++
if tries >= maxTriesPerEvent {
glog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event)
klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event)
break
}
// Randomize the first sleep so that various clients won't all be
@@ -194,13 +194,13 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv
switch err.(type) {
case *restclient.RequestConstructionError:
// We will construct the request the same next time, so don't keep trying.
glog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err)
klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err)
return true
case *errors.StatusError:
if errors.IsAlreadyExists(err) {
glog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err)
klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err)
} else {
glog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err)
klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err)
}
return true
case *errors.UnexpectedObjectError:
@@ -209,7 +209,7 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv
default:
// This case includes actual http transport errors. Go ahead and retry.
}
glog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err)
klog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err)
return false
}
@@ -256,12 +256,12 @@ type recorderImpl struct {
func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, timestamp metav1.Time, eventtype, reason, message string) {
ref, err := ref.GetReference(recorder.scheme, object)
if err != nil {
glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message)
klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message)
return
}
if !validateEventType(eventtype) {
glog.Errorf("Unsupported event type: '%v'", eventtype)
klog.Errorf("Unsupported event type: '%v'", eventtype)
return
}

41
vendor/k8s.io/client-go/tools/remotecommand/reader.go generated vendored Normal file
View File

@@ -0,0 +1,41 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
import (
"io"
)
// readerWrapper delegates to an io.Reader so that only the io.Reader interface is implemented,
// to keep io.Copy from doing things we don't want when copying from the reader to the data stream.
//
// If the Stdin io.Reader provided to remotecommand implements a WriteTo function (like bytes.Buffer does[1]),
// io.Copy calls that method[2] to attempt to write the entire buffer to the stream in one call.
// That results in an oversized call to spdystream.Stream#Write [3],
// which results in a single oversized data frame[4] that is too large.
//
// [1] https://golang.org/pkg/bytes/#Buffer.WriteTo
// [2] https://golang.org/pkg/io/#Copy
// [3] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/stream.go#L66-L73
// [4] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/spdy/write.go#L302-L304
type readerWrapper struct {
reader io.Reader
}
func (r readerWrapper) Read(p []byte) (int, error) {
return r.reader.Read(p)
}

View File

@@ -22,7 +22,7 @@ import (
"net/http"
"net/url"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/apimachinery/pkg/util/remotecommand"
@@ -132,7 +132,7 @@ func (e *streamExecutor) Stream(options StreamOptions) error {
case remotecommand.StreamProtocolV2Name:
streamer = newStreamProtocolV2(options)
case "":
glog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name)
klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name)
fallthrough
case remotecommand.StreamProtocolV1Name:
streamer = newStreamProtocolV1(options)

View File

@@ -22,9 +22,9 @@ import (
"io/ioutil"
"net/http"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/klog"
)
// streamProtocolV1 implements the first version of the streaming exec & attach
@@ -53,10 +53,10 @@ func (p *streamProtocolV1) stream(conn streamCreator) error {
errorChan := make(chan error)
cp := func(s string, dst io.Writer, src io.Reader) {
glog.V(6).Infof("Copying %s", s)
defer glog.V(6).Infof("Done copying %s", s)
klog.V(6).Infof("Copying %s", s)
defer klog.V(6).Infof("Done copying %s", s)
if _, err := io.Copy(dst, src); err != nil && err != io.EOF {
glog.Errorf("Error copying %s: %v", s, err)
klog.Errorf("Error copying %s: %v", s, err)
}
if s == v1.StreamTypeStdout || s == v1.StreamTypeStderr {
doneChan <- struct{}{}
@@ -127,7 +127,7 @@ func (p *streamProtocolV1) stream(conn streamCreator) error {
// because stdin is not closed until the process exits. If we try to call
// stdin.Close(), it returns no error but doesn't unblock the copy. It will
// exit when the process exits, instead.
go cp(v1.StreamTypeStdin, p.remoteStdin, p.Stdin)
go cp(v1.StreamTypeStdin, p.remoteStdin, readerWrapper{p.Stdin})
}
waitCount := 0

View File

@@ -101,7 +101,7 @@ func (p *streamProtocolV2) copyStdin() {
// the executed command will remain running.
defer once.Do(func() { p.remoteStdin.Close() })
if _, err := io.Copy(p.remoteStdin, p.Stdin); err != nil {
if _, err := io.Copy(p.remoteStdin, readerWrapper{p.Stdin}); err != nil {
runtime.HandleError(err)
}
}()

View File

@@ -22,13 +22,13 @@ import (
"fmt"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"
)
// PreconditionFunc returns true if the condition has been reached, false if it has not been reached yet,
@@ -135,7 +135,7 @@ func UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime.
func ContextWithOptionalTimeout(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) {
if timeout < 0 {
// This should be handled in validation
glog.Errorf("Timeout for context shall not be negative!")
klog.Errorf("Timeout for context shall not be negative!")
timeout = 0
}