Update dependency go modules for k8s v1.27.0-rc.0

This commit is contained in:
Sunny Song
2023-03-28 16:51:42 +00:00
parent d226af52de
commit 236537f90c
600 changed files with 33883 additions and 16186 deletions

View File

@@ -64,9 +64,8 @@ import (
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
rl "k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/utils/clock"
"k8s.io/klog/v2"
"k8s.io/utils/clock"
)
const (
@@ -199,9 +198,7 @@ type LeaderElector struct {
// stopped holding the leader lease
func (le *LeaderElector) Run(ctx context.Context) {
defer runtime.HandleCrash()
defer func() {
le.config.Callbacks.OnStoppedLeading()
}()
defer le.config.Callbacks.OnStoppedLeading()
if !le.acquire(ctx) {
return // ctx signalled done
@@ -263,6 +260,7 @@ func (le *LeaderElector) acquire(ctx context.Context) bool {
// renew loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew fails or ctx signals done.
func (le *LeaderElector) renew(ctx context.Context) {
defer le.config.Lock.RecordEvent("stopped leading")
ctx, cancel := context.WithCancel(ctx)
defer cancel()
wait.Until(func() {
@@ -278,7 +276,6 @@ func (le *LeaderElector) renew(ctx context.Context) {
klog.V(5).Infof("successfully renewed lease %v", desc)
return
}
le.config.Lock.RecordEvent("stopped leading")
le.metrics.leaderOff(le.config.Name)
klog.Infof("failed to renew lease %v: %v", desc, err)
cancel()
@@ -295,7 +292,7 @@ func (le *LeaderElector) release() bool {
if !le.IsLeader() {
return true
}
now := metav1.Now()
now := metav1.NewTime(le.clock.Now())
leaderElectionRecord := rl.LeaderElectionRecord{
LeaderTransitions: le.observedRecord.LeaderTransitions,
LeaseDurationSeconds: 1,
@@ -315,7 +312,7 @@ func (le *LeaderElector) release() bool {
// else it tries to renew the lease if it has already been acquired. Returns true
// on success else returns false.
func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool {
now := metav1.Now()
now := metav1.NewTime(le.clock.Now())
leaderElectionRecord := rl.LeaderElectionRecord{
HolderIdentity: le.config.Lock.Identity(),
LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second),
@@ -347,7 +344,7 @@ func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool {
le.observedRawRecord = oldLeaderElectionRawRecord
}
if len(oldLeaderElectionRecord.HolderIdentity) > 0 &&
le.observedTime.Add(le.config.LeaseDuration).After(now.Time) &&
le.observedTime.Add(time.Second*time.Duration(oldLeaderElectionRecord.LeaseDurationSeconds)).After(now.Time) &&
!le.IsLeader() {
klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity)
return false

View File

@@ -68,7 +68,7 @@ const (
// name: '*'
// namespace: kube-system
EndpointsLeasesResourceLock = "endpointsleases"
// When using EndpointsLeasesResourceLock, you need to ensure that
// When using ConfigMapsLeasesResourceLock, you need to ensure that
// API Priority & Fairness is configured with non-default flow-schema
// that will catch the necessary operations on leader-election related
// configmap objects.

View File

@@ -117,10 +117,10 @@ func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElec
r.LeaderTransitions = int(*spec.LeaseTransitions)
}
if spec.AcquireTime != nil {
r.AcquireTime = metav1.Time{spec.AcquireTime.Time}
r.AcquireTime = metav1.Time{Time: spec.AcquireTime.Time}
}
if spec.RenewTime != nil {
r.RenewTime = metav1.Time{spec.RenewTime.Time}
r.RenewTime = metav1.Time{Time: spec.RenewTime.Time}
}
return &r
@@ -132,8 +132,8 @@ func LeaderElectionRecordToLeaseSpec(ler *LeaderElectionRecord) coordinationv1.L
return coordinationv1.LeaseSpec{
HolderIdentity: &ler.HolderIdentity,
LeaseDurationSeconds: &leaseDurationSeconds,
AcquireTime: &metav1.MicroTime{ler.AcquireTime.Time},
RenewTime: &metav1.MicroTime{ler.RenewTime.Time},
AcquireTime: &metav1.MicroTime{Time: ler.AcquireTime.Time},
RenewTime: &metav1.MicroTime{Time: ler.RenewTime.Time},
LeaseTransitions: &leaseTransitions,
}
}