Bumping k8s version to 1.13.0-beta.1

This commit is contained in:
Cheng Xing
2018-11-19 14:10:50 -08:00
parent 01bd7f356e
commit e2f1bdc372
633 changed files with 11189 additions and 126194 deletions

View File

@@ -56,11 +56,12 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
rl "k8s.io/client-go/tools/leaderelection/resourcelock"
"github.com/golang/glog"
"k8s.io/klog"
)
const (
@@ -90,6 +91,7 @@ func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) {
}
return &LeaderElector{
config: lec,
clock: clock.RealClock{},
}, nil
}
@@ -111,6 +113,13 @@ type LeaderElectionConfig struct {
// Callbacks are callbacks that are triggered during certain lifecycle
// events of the LeaderElector
Callbacks LeaderCallbacks
// WatchDog is the associated health checker
// WatchDog may be null if its not needed/configured.
WatchDog *HealthzAdaptor
// Name is the name of the resource lock for debugging
Name string
}
// LeaderCallbacks are callbacks that are triggered during certain
@@ -139,6 +148,12 @@ type LeaderElector struct {
// value observedRecord.HolderIdentity if the transition has
// not yet been reported.
reportedLeader string
// clock is wrapper around time to allow for less flaky testing
clock clock.Clock
// name is the name of the resource lock for debugging
name string
}
// Run starts the leader election loop
@@ -163,6 +178,9 @@ func RunOrDie(ctx context.Context, lec LeaderElectionConfig) {
if err != nil {
panic(err)
}
if lec.WatchDog != nil {
lec.WatchDog.SetLeaderElection(le)
}
le.Run(ctx)
}
@@ -184,16 +202,16 @@ func (le *LeaderElector) acquire(ctx context.Context) bool {
defer cancel()
succeeded := false
desc := le.config.Lock.Describe()
glog.Infof("attempting to acquire leader lease %v...", desc)
klog.Infof("attempting to acquire leader lease %v...", desc)
wait.JitterUntil(func() {
succeeded = le.tryAcquireOrRenew()
le.maybeReportTransition()
if !succeeded {
glog.V(4).Infof("failed to acquire lease %v", desc)
klog.V(4).Infof("failed to acquire lease %v", desc)
return
}
le.config.Lock.RecordEvent("became leader")
glog.Infof("successfully acquired lease %v", desc)
klog.Infof("successfully acquired lease %v", desc)
cancel()
}, le.config.RetryPeriod, JitterFactor, true, ctx.Done())
return succeeded
@@ -224,11 +242,11 @@ func (le *LeaderElector) renew(ctx context.Context) {
le.maybeReportTransition()
desc := le.config.Lock.Describe()
if err == nil {
glog.V(5).Infof("successfully renewed lease %v", desc)
klog.V(5).Infof("successfully renewed lease %v", desc)
return
}
le.config.Lock.RecordEvent("stopped leading")
glog.Infof("failed to renew lease %v: %v", desc, err)
klog.Infof("failed to renew lease %v: %v", desc, err)
cancel()
}, le.config.RetryPeriod, ctx.Done())
}
@@ -249,26 +267,26 @@ func (le *LeaderElector) tryAcquireOrRenew() bool {
oldLeaderElectionRecord, err := le.config.Lock.Get()
if err != nil {
if !errors.IsNotFound(err) {
glog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err)
klog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err)
return false
}
if err = le.config.Lock.Create(leaderElectionRecord); err != nil {
glog.Errorf("error initially creating leader election record: %v", err)
klog.Errorf("error initially creating leader election record: %v", err)
return false
}
le.observedRecord = leaderElectionRecord
le.observedTime = time.Now()
le.observedTime = le.clock.Now()
return true
}
// 2. Record obtained, check the Identity & Time
if !reflect.DeepEqual(le.observedRecord, *oldLeaderElectionRecord) {
le.observedRecord = *oldLeaderElectionRecord
le.observedTime = time.Now()
le.observedTime = le.clock.Now()
}
if le.observedTime.Add(le.config.LeaseDuration).After(now.Time) &&
!le.IsLeader() {
glog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity)
klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity)
return false
}
@@ -283,11 +301,11 @@ func (le *LeaderElector) tryAcquireOrRenew() bool {
// update the lock itself
if err = le.config.Lock.Update(leaderElectionRecord); err != nil {
glog.Errorf("Failed to update lock: %v", err)
klog.Errorf("Failed to update lock: %v", err)
return false
}
le.observedRecord = leaderElectionRecord
le.observedTime = time.Now()
le.observedTime = le.clock.Now()
return true
}
@@ -300,3 +318,19 @@ func (le *LeaderElector) maybeReportTransition() {
go le.config.Callbacks.OnNewLeader(le.reportedLeader)
}
}
// Check will determine if the current lease is expired by more than timeout.
func (le *LeaderElector) Check(maxTolerableExpiredLease time.Duration) error {
if !le.IsLeader() {
// Currently not concerned with the case that we are hot standby
return nil
}
// If we are more than timeout seconds after the lease duration that is past the timeout
// on the lease renew. Time to start reporting ourselves as unhealthy. We should have
// died but conditions like deadlock can prevent this. (See #70819)
if le.clock.Since(le.observedTime) > le.config.LeaseDuration+maxTolerableExpiredLease {
return fmt.Errorf("failed election to renew leadership on lease %s", le.config.Name)
}
return nil
}