Bump dependencies to Kubernetes 1.20
This commit is contained in:
4
vendor/k8s.io/client-go/tools/auth/clientauth.go
generated
vendored
4
vendor/k8s.io/client-go/tools/auth/clientauth.go
generated
vendored
@@ -75,11 +75,11 @@ import (
|
||||
// to be read/written from a file as a JSON object.
|
||||
type Info struct {
|
||||
User string
|
||||
Password string
|
||||
Password string `datapolicy:"password"`
|
||||
CAFile string
|
||||
CertFile string
|
||||
KeyFile string
|
||||
BearerToken string
|
||||
BearerToken string `datapolicy:"token"`
|
||||
Insecure *bool
|
||||
}
|
||||
|
||||
|
1
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
1
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
@@ -38,6 +38,5 @@ reviewers:
|
||||
- resouer
|
||||
- jessfraz
|
||||
- mfojtik
|
||||
- mqliang
|
||||
- sdminonne
|
||||
- ncdc
|
||||
|
6
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
6
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
@@ -72,6 +72,9 @@ type Config struct {
|
||||
|
||||
// Called whenever the ListAndWatch drops the connection with an error.
|
||||
WatchErrorHandler WatchErrorHandler
|
||||
|
||||
// WatchListPageSize is the requested chunk size of initial and relist watch lists.
|
||||
WatchListPageSize int64
|
||||
}
|
||||
|
||||
// ShouldResyncFunc is a type of function that indicates if a reflector should perform a
|
||||
@@ -134,6 +137,7 @@ func (c *controller) Run(stopCh <-chan struct{}) {
|
||||
c.config.FullResyncPeriod,
|
||||
)
|
||||
r.ShouldResync = c.config.ShouldResync
|
||||
r.WatchListPageSize = c.config.WatchListPageSize
|
||||
r.clock = c.clock
|
||||
if c.config.WatchErrorHandler != nil {
|
||||
r.watchErrorHandler = c.config.WatchErrorHandler
|
||||
@@ -144,11 +148,11 @@ func (c *controller) Run(stopCh <-chan struct{}) {
|
||||
c.reflectorMutex.Unlock()
|
||||
|
||||
var wg wait.Group
|
||||
defer wg.Wait()
|
||||
|
||||
wg.StartWithChannel(stopCh, r.Run)
|
||||
|
||||
wait.Until(c.processLoop, time.Second, stopCh)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Returns true once this controller has completed an initial resource listing
|
||||
|
34
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
34
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
@@ -145,7 +145,7 @@ func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO {
|
||||
// DeltaFIFO's Pop(), Get(), and GetByKey() methods return
|
||||
// interface{} to satisfy the Store/Queue interfaces, but they
|
||||
// will always return an object of type Deltas. List() returns
|
||||
// the newest objects currently in the FIFO.
|
||||
// the newest object from each accumulator in the FIFO.
|
||||
//
|
||||
// A DeltaFIFO's knownObjects KeyListerGetter provides the abilities
|
||||
// to list Store keys and to get objects by Store key. The objects in
|
||||
@@ -161,12 +161,13 @@ type DeltaFIFO struct {
|
||||
lock sync.RWMutex
|
||||
cond sync.Cond
|
||||
|
||||
// `items` maps keys to Deltas.
|
||||
// `queue` maintains FIFO order of keys for consumption in Pop().
|
||||
// We maintain the property that keys in the `items` and `queue` are
|
||||
// strictly 1:1 mapping, and that all Deltas in `items` should have
|
||||
// at least one Delta.
|
||||
// `items` maps a key to a Deltas.
|
||||
// Each such Deltas has at least one Delta.
|
||||
items map[string]Deltas
|
||||
|
||||
// `queue` maintains FIFO order of keys for consumption in Pop().
|
||||
// There are no duplicates in `queue`.
|
||||
// A key is in `queue` if and only if it is in `items`.
|
||||
queue []string
|
||||
|
||||
// populated is true if the first batch of items inserted by Replace() has been populated
|
||||
@@ -376,8 +377,8 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
|
||||
newDeltas := append(f.items[id], Delta{actionType, obj})
|
||||
oldDeltas := f.items[id]
|
||||
newDeltas := append(oldDeltas, Delta{actionType, obj})
|
||||
newDeltas = dedupDeltas(newDeltas)
|
||||
|
||||
if len(newDeltas) > 0 {
|
||||
@@ -389,10 +390,14 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err
|
||||
} else {
|
||||
// This never happens, because dedupDeltas never returns an empty list
|
||||
// when given a non-empty list (as it is here).
|
||||
// But if somehow it ever does return an empty list, then
|
||||
// We need to remove this from our map (extra items in the queue are
|
||||
// ignored if they are not in the map).
|
||||
delete(f.items, id)
|
||||
// If somehow it happens anyway, deal with it but complain.
|
||||
if oldDeltas == nil {
|
||||
klog.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; ignoring", id, oldDeltas, obj)
|
||||
return nil
|
||||
}
|
||||
klog.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; breaking invariant by storing empty Deltas", id, oldDeltas, obj)
|
||||
f.items[id] = newDeltas
|
||||
return fmt.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; broke DeltaFIFO invariant by storing empty Deltas", id, oldDeltas, obj)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -459,7 +464,7 @@ func (f *DeltaFIFO) IsClosed() bool {
|
||||
return f.closed
|
||||
}
|
||||
|
||||
// Pop blocks until an item is added to the queue, and then returns it. If
|
||||
// Pop blocks until the queue has some items, and then returns one. If
|
||||
// multiple items are ready, they are returned in the order in which they were
|
||||
// added/updated. The item is removed from the queue (and the store) before it
|
||||
// is returned, so if you don't successfully process it, you need to add it back
|
||||
@@ -494,7 +499,8 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
}
|
||||
item, ok := f.items[id]
|
||||
if !ok {
|
||||
// Item may have been deleted subsequently.
|
||||
// This should never happen
|
||||
klog.Errorf("Inconceivable! %q was in f.queue but not f.items; ignoring.", id)
|
||||
continue
|
||||
}
|
||||
delete(f.items, id)
|
||||
|
50
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
50
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
@@ -69,6 +69,8 @@ type Reflector struct {
|
||||
|
||||
// backoff manages backoff of ListWatch
|
||||
backoffManager wait.BackoffManager
|
||||
// initConnBackoffManager manages backoff the initial connection with the Watch calll of ListAndWatch.
|
||||
initConnBackoffManager wait.BackoffManager
|
||||
|
||||
resyncPeriod time.Duration
|
||||
// ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked
|
||||
@@ -99,6 +101,15 @@ type Reflector struct {
|
||||
watchErrorHandler WatchErrorHandler
|
||||
}
|
||||
|
||||
// ResourceVersionUpdater is an interface that allows store implementation to
|
||||
// track the current resource version of the reflector. This is especially
|
||||
// important if storage bookmarks are enabled.
|
||||
type ResourceVersionUpdater interface {
|
||||
// UpdateResourceVersion is called each time current resource version of the reflector
|
||||
// is updated.
|
||||
UpdateResourceVersion(resourceVersion string)
|
||||
}
|
||||
|
||||
// The WatchErrorHandler is called whenever ListAndWatch drops the
|
||||
// connection with an error. After calling this handler, the informer
|
||||
// will backoff and retry.
|
||||
@@ -166,10 +177,11 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{},
|
||||
// We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when
|
||||
// API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is
|
||||
// 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff.
|
||||
backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock),
|
||||
resyncPeriod: resyncPeriod,
|
||||
clock: realClock,
|
||||
watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler),
|
||||
backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock),
|
||||
initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock),
|
||||
resyncPeriod: resyncPeriod,
|
||||
clock: realClock,
|
||||
watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler),
|
||||
}
|
||||
r.setExpectedType(expectedType)
|
||||
return r
|
||||
@@ -404,9 +416,9 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
// If this is "connection refused" error, it means that most likely apiserver is not responsive.
|
||||
// It doesn't make sense to re-list all objects because most likely we will be able to restart
|
||||
// watch where we ended.
|
||||
// If that's the case wait and resend watch request.
|
||||
// If that's the case begin exponentially backing off and resend watch request.
|
||||
if utilnet.IsConnectionRefused(err) {
|
||||
time.Sleep(time.Second)
|
||||
<-r.initConnBackoffManager.Backoff().C()
|
||||
continue
|
||||
}
|
||||
return err
|
||||
@@ -504,6 +516,9 @@ loop:
|
||||
}
|
||||
*resourceVersion = newResourceVersion
|
||||
r.setLastSyncResourceVersion(newResourceVersion)
|
||||
if rvu, ok := r.store.(ResourceVersionUpdater); ok {
|
||||
rvu.UpdateResourceVersion(newResourceVersion)
|
||||
}
|
||||
eventCount++
|
||||
}
|
||||
}
|
||||
@@ -570,5 +585,26 @@ func isExpiredError(err error) bool {
|
||||
}
|
||||
|
||||
func isTooLargeResourceVersionError(err error) bool {
|
||||
return apierrors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge)
|
||||
if apierrors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge) {
|
||||
return true
|
||||
}
|
||||
// In Kubernetes 1.17.0-1.18.5, the api server doesn't set the error status cause to
|
||||
// metav1.CauseTypeResourceVersionTooLarge to indicate that the requested minimum resource
|
||||
// version is larger than the largest currently available resource version. To ensure backward
|
||||
// compatibility with these server versions we also need to detect the error based on the content
|
||||
// of the error message field.
|
||||
if !apierrors.IsTimeout(err) {
|
||||
return false
|
||||
}
|
||||
apierr, ok := err.(apierrors.APIStatus)
|
||||
if !ok || apierr == nil || apierr.Status().Details == nil {
|
||||
return false
|
||||
}
|
||||
for _, cause := range apierr.Status().Details.Causes {
|
||||
// Matches the message returned by api server 1.17.0-1.18.5 for this error condition
|
||||
if cause.Message == "Too large resource version" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
4
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
4
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
@@ -485,13 +485,13 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv
|
||||
|
||||
if resyncPeriod > 0 {
|
||||
if resyncPeriod < minimumResyncPeriod {
|
||||
klog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod)
|
||||
klog.Warningf("resyncPeriod %v is too small. Changing it to the minimum allowed value of %v", resyncPeriod, minimumResyncPeriod)
|
||||
resyncPeriod = minimumResyncPeriod
|
||||
}
|
||||
|
||||
if resyncPeriod < s.resyncCheckPeriod {
|
||||
if s.started {
|
||||
klog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
|
||||
klog.Warningf("resyncPeriod %v is smaller than resyncCheckPeriod %v and the informer has already started. Changing it to %v", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
|
||||
resyncPeriod = s.resyncCheckPeriod
|
||||
} else {
|
||||
// if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update
|
||||
|
42
vendor/k8s.io/client-go/tools/clientcmd/api/types.go
generated
vendored
42
vendor/k8s.io/client-go/tools/clientcmd/api/types.go
generated
vendored
@@ -114,10 +114,10 @@ type AuthInfo struct {
|
||||
ClientKey string `json:"client-key,omitempty"`
|
||||
// ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey
|
||||
// +optional
|
||||
ClientKeyData []byte `json:"client-key-data,omitempty"`
|
||||
ClientKeyData []byte `json:"client-key-data,omitempty" datapolicy:"security-key"`
|
||||
// Token is the bearer token for authentication to the kubernetes cluster.
|
||||
// +optional
|
||||
Token string `json:"token,omitempty"`
|
||||
Token string `json:"token,omitempty" datapolicy:"token"`
|
||||
// TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence.
|
||||
// +optional
|
||||
TokenFile string `json:"tokenFile,omitempty"`
|
||||
@@ -135,7 +135,7 @@ type AuthInfo struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
// Password is the password for basic authentication to the kubernetes cluster.
|
||||
// +optional
|
||||
Password string `json:"password,omitempty"`
|
||||
Password string `json:"password,omitempty" datapolicy:"password"`
|
||||
// AuthProvider specifies a custom authentication plugin for the kubernetes cluster.
|
||||
// +optional
|
||||
AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"`
|
||||
@@ -215,6 +215,36 @@ type ExecConfig struct {
|
||||
// present. For example, `brew install foo-cli` might be a good InstallHint for
|
||||
// foo-cli on Mac OS systems.
|
||||
InstallHint string `json:"installHint,omitempty"`
|
||||
|
||||
// ProvideClusterInfo determines whether or not to provide cluster information,
|
||||
// which could potentially contain very large CA data, to this exec plugin as a
|
||||
// part of the KUBERNETES_EXEC_INFO environment variable. By default, it is set
|
||||
// to false. Package k8s.io/client-go/tools/auth/exec provides helper methods for
|
||||
// reading this environment variable.
|
||||
ProvideClusterInfo bool `json:"provideClusterInfo"`
|
||||
|
||||
// Config holds additional config data that is specific to the exec
|
||||
// plugin with regards to the cluster being authenticated to.
|
||||
//
|
||||
// This data is sourced from the clientcmd Cluster object's extensions[exec] field:
|
||||
//
|
||||
// clusters:
|
||||
// - name: my-cluster
|
||||
// cluster:
|
||||
// ...
|
||||
// extensions:
|
||||
// - name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config
|
||||
// extension:
|
||||
// audience: 06e3fbd18de8 # arbitrary config
|
||||
//
|
||||
// In some environments, the user config may be exactly the same across many clusters
|
||||
// (i.e. call this exec plugin) minus some details that are specific to each cluster
|
||||
// such as the audience. This field allows the per cluster config to be directly
|
||||
// specified with the cluster info. Using this field to store secret data is not
|
||||
// recommended as one of the prime benefits of exec plugins is that no secrets need
|
||||
// to be stored directly in the kubeconfig.
|
||||
// +k8s:conversion-gen=false
|
||||
Config runtime.Object
|
||||
}
|
||||
|
||||
var _ fmt.Stringer = new(ExecConfig)
|
||||
@@ -237,7 +267,11 @@ func (c ExecConfig) String() string {
|
||||
if len(c.Env) > 0 {
|
||||
env = "[]ExecEnvVar{--- REDACTED ---}"
|
||||
}
|
||||
return fmt.Sprintf("api.AuthProviderConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q}", c.Command, args, env, c.APIVersion)
|
||||
config := "runtime.Object(nil)"
|
||||
if c.Config != nil {
|
||||
config = "runtime.Object(--- REDACTED ---)"
|
||||
}
|
||||
return fmt.Sprintf("api.ExecConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q, ProvideClusterInfo: %t, Config: %s}", c.Command, args, env, c.APIVersion, c.ProvideClusterInfo, config)
|
||||
}
|
||||
|
||||
// ExecEnvVar is used for setting environment variables when executing an exec-based
|
||||
|
13
vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go
generated
vendored
13
vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go
generated
vendored
@@ -104,10 +104,10 @@ type AuthInfo struct {
|
||||
ClientKey string `json:"client-key,omitempty"`
|
||||
// ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey
|
||||
// +optional
|
||||
ClientKeyData []byte `json:"client-key-data,omitempty"`
|
||||
ClientKeyData []byte `json:"client-key-data,omitempty" datapolicy:"security-key"`
|
||||
// Token is the bearer token for authentication to the kubernetes cluster.
|
||||
// +optional
|
||||
Token string `json:"token,omitempty"`
|
||||
Token string `json:"token,omitempty" datapolicy:"token"`
|
||||
// TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence.
|
||||
// +optional
|
||||
TokenFile string `json:"tokenFile,omitempty"`
|
||||
@@ -125,7 +125,7 @@ type AuthInfo struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
// Password is the password for basic authentication to the kubernetes cluster.
|
||||
// +optional
|
||||
Password string `json:"password,omitempty"`
|
||||
Password string `json:"password,omitempty" datapolicy:"password"`
|
||||
// AuthProvider specifies a custom authentication plugin for the kubernetes cluster.
|
||||
// +optional
|
||||
AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"`
|
||||
@@ -214,6 +214,13 @@ type ExecConfig struct {
|
||||
// present. For example, `brew install foo-cli` might be a good InstallHint for
|
||||
// foo-cli on Mac OS systems.
|
||||
InstallHint string `json:"installHint,omitempty"`
|
||||
|
||||
// ProvideClusterInfo determines whether or not to provide cluster information,
|
||||
// which could potentially contain very large CA data, to this exec plugin as a
|
||||
// part of the KUBERNETES_EXEC_INFO environment variable. By default, it is set
|
||||
// to false. Package k8s.io/client-go/tools/auth/exec provides helper methods for
|
||||
// reading this environment variable.
|
||||
ProvideClusterInfo bool `json:"provideClusterInfo"`
|
||||
}
|
||||
|
||||
// ExecEnvVar is used for setting environment variables when executing an exec-based
|
||||
|
23
vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go
generated
vendored
23
vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go
generated
vendored
@@ -171,7 +171,15 @@ func autoConvert_v1_AuthInfo_To_api_AuthInfo(in *AuthInfo, out *api.AuthInfo, s
|
||||
out.Username = in.Username
|
||||
out.Password = in.Password
|
||||
out.AuthProvider = (*api.AuthProviderConfig)(unsafe.Pointer(in.AuthProvider))
|
||||
out.Exec = (*api.ExecConfig)(unsafe.Pointer(in.Exec))
|
||||
if in.Exec != nil {
|
||||
in, out := &in.Exec, &out.Exec
|
||||
*out = new(api.ExecConfig)
|
||||
if err := Convert_v1_ExecConfig_To_api_ExecConfig(*in, *out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.Exec = nil
|
||||
}
|
||||
if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -197,7 +205,15 @@ func autoConvert_api_AuthInfo_To_v1_AuthInfo(in *api.AuthInfo, out *AuthInfo, s
|
||||
out.Username = in.Username
|
||||
out.Password = in.Password
|
||||
out.AuthProvider = (*AuthProviderConfig)(unsafe.Pointer(in.AuthProvider))
|
||||
out.Exec = (*ExecConfig)(unsafe.Pointer(in.Exec))
|
||||
if in.Exec != nil {
|
||||
in, out := &in.Exec, &out.Exec
|
||||
*out = new(ExecConfig)
|
||||
if err := Convert_api_ExecConfig_To_v1_ExecConfig(*in, *out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.Exec = nil
|
||||
}
|
||||
if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -359,6 +375,7 @@ func autoConvert_v1_ExecConfig_To_api_ExecConfig(in *ExecConfig, out *api.ExecCo
|
||||
out.Env = *(*[]api.ExecEnvVar)(unsafe.Pointer(&in.Env))
|
||||
out.APIVersion = in.APIVersion
|
||||
out.InstallHint = in.InstallHint
|
||||
out.ProvideClusterInfo = in.ProvideClusterInfo
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -373,6 +390,8 @@ func autoConvert_api_ExecConfig_To_v1_ExecConfig(in *api.ExecConfig, out *ExecCo
|
||||
out.Env = *(*[]ExecEnvVar)(unsafe.Pointer(&in.Env))
|
||||
out.APIVersion = in.APIVersion
|
||||
out.InstallHint = in.InstallHint
|
||||
out.ProvideClusterInfo = in.ProvideClusterInfo
|
||||
// INFO: in.Config opted out of conversion generation
|
||||
return nil
|
||||
}
|
||||
|
||||
|
3
vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
generated
vendored
3
vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
generated
vendored
@@ -267,6 +267,9 @@ func (in *ExecConfig) DeepCopyInto(out *ExecConfig) {
|
||||
*out = make([]ExecEnvVar, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Config != nil {
|
||||
out.Config = in.Config.DeepCopyObject()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
12
vendor/k8s.io/client-go/tools/clientcmd/client_config.go
generated
vendored
12
vendor/k8s.io/client-go/tools/clientcmd/client_config.go
generated
vendored
@@ -34,6 +34,11 @@ import (
|
||||
"github.com/imdario/mergo"
|
||||
)
|
||||
|
||||
const (
|
||||
// clusterExtensionKey is reserved in the cluster extensions list for exec plugin config.
|
||||
clusterExtensionKey = "client.authentication.k8s.io/exec"
|
||||
)
|
||||
|
||||
var (
|
||||
// ClusterDefaults has the same behavior as the old EnvVar and DefaultCluster fields
|
||||
// DEPRECATED will be replaced
|
||||
@@ -72,7 +77,7 @@ type PersistAuthProviderConfigForUser func(user string) restclient.AuthProviderC
|
||||
|
||||
type promptedCredentials struct {
|
||||
username string
|
||||
password string
|
||||
password string `datapolicy:"password"`
|
||||
}
|
||||
|
||||
// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information
|
||||
@@ -189,7 +194,7 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) {
|
||||
authInfoName, _ := config.getAuthInfoName()
|
||||
persister = PersisterForUser(config.configAccess, authInfoName)
|
||||
}
|
||||
userAuthPartialConfig, err := config.getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister)
|
||||
userAuthPartialConfig, err := config.getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister, configClusterInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -232,7 +237,7 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo,
|
||||
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
|
||||
// 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file
|
||||
// 4. if there is not enough information to identify the user, prompt if possible
|
||||
func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister) (*restclient.Config, error) {
|
||||
func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) {
|
||||
mergedConfig := &restclient.Config{}
|
||||
|
||||
// blindly overwrite existing values based on precedence
|
||||
@@ -271,6 +276,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI
|
||||
if configAuthInfo.Exec != nil {
|
||||
mergedConfig.ExecProvider = configAuthInfo.Exec
|
||||
mergedConfig.ExecProvider.InstallHint = cleanANSIEscapeCodes(mergedConfig.ExecProvider.InstallHint)
|
||||
mergedConfig.ExecProvider.Config = configClusterInfo.Extensions[clusterExtensionKey]
|
||||
}
|
||||
|
||||
// if there still isn't enough information to authenticate the user, try prompting
|
||||
|
5
vendor/k8s.io/client-go/tools/clientcmd/config.go
generated
vendored
5
vendor/k8s.io/client-go/tools/clientcmd/config.go
generated
vendored
@@ -83,10 +83,13 @@ func (o *PathOptions) GetEnvVarFiles() []string {
|
||||
}
|
||||
|
||||
func (o *PathOptions) GetLoadingPrecedence() []string {
|
||||
if o.IsExplicitFile() {
|
||||
return []string{o.GetExplicitFile()}
|
||||
}
|
||||
|
||||
if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 {
|
||||
return envVarFiles
|
||||
}
|
||||
|
||||
return []string{o.GlobalFile}
|
||||
}
|
||||
|
||||
|
4
vendor/k8s.io/client-go/tools/clientcmd/loader.go
generated
vendored
4
vendor/k8s.io/client-go/tools/clientcmd/loader.go
generated
vendored
@@ -304,6 +304,10 @@ func (rules *ClientConfigLoadingRules) Migrate() error {
|
||||
|
||||
// GetLoadingPrecedence implements ConfigAccess
|
||||
func (rules *ClientConfigLoadingRules) GetLoadingPrecedence() []string {
|
||||
if len(rules.ExplicitPath) > 0 {
|
||||
return []string{rules.ExplicitPath}
|
||||
}
|
||||
|
||||
return rules.Precedence
|
||||
}
|
||||
|
||||
|
18
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
18
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
@@ -188,12 +188,11 @@ type LeaderElector struct {
|
||||
clock clock.Clock
|
||||
|
||||
metrics leaderMetricsAdapter
|
||||
|
||||
// name is the name of the resource lock for debugging
|
||||
name string
|
||||
}
|
||||
|
||||
// Run starts the leader election loop
|
||||
// Run starts the leader election loop. Run will not return
|
||||
// before leader election loop is stopped by ctx or it has
|
||||
// stopped holding the leader lease
|
||||
func (le *LeaderElector) Run(ctx context.Context) {
|
||||
defer runtime.HandleCrash()
|
||||
defer func() {
|
||||
@@ -210,7 +209,8 @@ func (le *LeaderElector) Run(ctx context.Context) {
|
||||
}
|
||||
|
||||
// RunOrDie starts a client with the provided config or panics if the config
|
||||
// fails to validate.
|
||||
// fails to validate. RunOrDie blocks until leader election loop is
|
||||
// stopped by ctx or it has stopped holding the leader lease
|
||||
func RunOrDie(ctx context.Context, lec LeaderElectionConfig) {
|
||||
le, err := NewLeaderElector(lec)
|
||||
if err != nil {
|
||||
@@ -240,7 +240,7 @@ func (le *LeaderElector) acquire(ctx context.Context) bool {
|
||||
defer cancel()
|
||||
succeeded := false
|
||||
desc := le.config.Lock.Describe()
|
||||
klog.Infof("attempting to acquire leader lease %v...", desc)
|
||||
klog.Infof("attempting to acquire leader lease %v...", desc)
|
||||
wait.JitterUntil(func() {
|
||||
succeeded = le.tryAcquireOrRenew(ctx)
|
||||
le.maybeReportTransition()
|
||||
@@ -290,8 +290,12 @@ func (le *LeaderElector) release() bool {
|
||||
if !le.IsLeader() {
|
||||
return true
|
||||
}
|
||||
now := metav1.Now()
|
||||
leaderElectionRecord := rl.LeaderElectionRecord{
|
||||
LeaderTransitions: le.observedRecord.LeaderTransitions,
|
||||
LeaderTransitions: le.observedRecord.LeaderTransitions,
|
||||
LeaseDurationSeconds: 1,
|
||||
RenewTime: now,
|
||||
AcquireTime: now,
|
||||
}
|
||||
if err := le.config.Lock.Update(context.TODO(), leaderElectionRecord); err != nil {
|
||||
klog.Errorf("Failed to release lock: %v", err)
|
||||
|
15
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
15
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
@@ -52,13 +52,14 @@ func (cml *ConfigMapLock) Get(ctx context.Context) (*LeaderElectionRecord, []byt
|
||||
if cml.cm.Annotations == nil {
|
||||
cml.cm.Annotations = make(map[string]string)
|
||||
}
|
||||
recordBytes, found := cml.cm.Annotations[LeaderElectionRecordAnnotationKey]
|
||||
recordStr, found := cml.cm.Annotations[LeaderElectionRecordAnnotationKey]
|
||||
recordBytes := []byte(recordStr)
|
||||
if found {
|
||||
if err := json.Unmarshal([]byte(recordBytes), &record); err != nil {
|
||||
if err := json.Unmarshal(recordBytes, &record); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return &record, []byte(recordBytes), nil
|
||||
return &record, recordBytes, nil
|
||||
}
|
||||
|
||||
// Create attempts to create a LeaderElectionRecord annotation
|
||||
@@ -92,8 +93,12 @@ func (cml *ConfigMapLock) Update(ctx context.Context, ler LeaderElectionRecord)
|
||||
cml.cm.Annotations = make(map[string]string)
|
||||
}
|
||||
cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
|
||||
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(ctx, cml.cm, metav1.UpdateOptions{})
|
||||
return err
|
||||
cm, err := cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(ctx, cml.cm, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cml.cm = cm
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
|
15
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
15
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
@@ -47,13 +47,14 @@ func (el *EndpointsLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte
|
||||
if el.e.Annotations == nil {
|
||||
el.e.Annotations = make(map[string]string)
|
||||
}
|
||||
recordBytes, found := el.e.Annotations[LeaderElectionRecordAnnotationKey]
|
||||
recordStr, found := el.e.Annotations[LeaderElectionRecordAnnotationKey]
|
||||
recordBytes := []byte(recordStr)
|
||||
if found {
|
||||
if err := json.Unmarshal([]byte(recordBytes), &record); err != nil {
|
||||
if err := json.Unmarshal(recordBytes, &record); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return &record, []byte(recordBytes), nil
|
||||
return &record, recordBytes, nil
|
||||
}
|
||||
|
||||
// Create attempts to create a LeaderElectionRecord annotation
|
||||
@@ -87,8 +88,12 @@ func (el *EndpointsLock) Update(ctx context.Context, ler LeaderElectionRecord) e
|
||||
el.e.Annotations = make(map[string]string)
|
||||
}
|
||||
el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
|
||||
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(ctx, el.e, metav1.UpdateOptions{})
|
||||
return err
|
||||
e, err := el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(ctx, el.e, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
el.e = e
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
|
16
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
16
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
@@ -19,6 +19,9 @@ package resourcelock
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -140,3 +143,16 @@ func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interf
|
||||
return nil, fmt.Errorf("Invalid lock-type %s", lockType)
|
||||
}
|
||||
}
|
||||
|
||||
// NewFromKubeconfig will create a lock of a given type according to the input parameters.
|
||||
func NewFromKubeconfig(lockType string, ns string, name string, rlc ResourceLockConfig, kubeconfig *restclient.Config, renewDeadline time.Duration) (Interface, error) {
|
||||
// shallow copy, do not modify the kubeconfig
|
||||
config := *kubeconfig
|
||||
timeout := ((renewDeadline / time.Millisecond) / 2) * time.Millisecond
|
||||
if timeout < time.Second {
|
||||
timeout = time.Second
|
||||
}
|
||||
config.Timeout = timeout
|
||||
leaderElectionClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "leader-election"))
|
||||
return New(lockType, ns, name, leaderElectionClient.CoreV1(), leaderElectionClient.CoordinationV1(), rlc)
|
||||
}
|
||||
|
11
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
generated
vendored
11
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
generated
vendored
@@ -71,9 +71,14 @@ func (ll *LeaseLock) Update(ctx context.Context, ler LeaderElectionRecord) error
|
||||
return errors.New("lease not initialized, call get or create first")
|
||||
}
|
||||
ll.lease.Spec = LeaderElectionRecordToLeaseSpec(&ler)
|
||||
var err error
|
||||
ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Update(ctx, ll.lease, metav1.UpdateOptions{})
|
||||
return err
|
||||
|
||||
lease, err := ll.Client.Leases(ll.LeaseMeta.Namespace).Update(ctx, ll.lease, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ll.lease = lease
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
|
2
vendor/k8s.io/client-go/tools/record/event.go
generated
vendored
2
vendor/k8s.io/client-go/tools/record/event.go
generated
vendored
@@ -270,7 +270,7 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv
|
||||
default:
|
||||
// This case includes actual http transport errors. Go ahead and retry.
|
||||
}
|
||||
klog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err)
|
||||
klog.Errorf("Unable to write event: '%#v': '%v'(may retry after sleeping)", event, err)
|
||||
return false
|
||||
}
|
||||
|
||||
|
16
vendor/k8s.io/client-go/tools/record/fake.go
generated
vendored
16
vendor/k8s.io/client-go/tools/record/fake.go
generated
vendored
@@ -27,17 +27,29 @@ import (
|
||||
// thrown away in this case.
|
||||
type FakeRecorder struct {
|
||||
Events chan string
|
||||
|
||||
IncludeObject bool
|
||||
}
|
||||
|
||||
func objectString(object runtime.Object, includeObject bool) string {
|
||||
if !includeObject {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf(" involvedObject{kind=%s,apiVersion=%s}",
|
||||
object.GetObjectKind().GroupVersionKind().Kind,
|
||||
object.GetObjectKind().GroupVersionKind().GroupVersion(),
|
||||
)
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) {
|
||||
if f.Events != nil {
|
||||
f.Events <- fmt.Sprintf("%s %s %s", eventtype, reason, message)
|
||||
f.Events <- fmt.Sprintf("%s %s %s%s", eventtype, reason, message, objectString(object, f.IncludeObject))
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
if f.Events != nil {
|
||||
f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...)
|
||||
f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + objectString(object, f.IncludeObject)
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user