Bumping k8s dependencies to 1.13
This commit is contained in:
276
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go
generated
vendored
276
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go
generated
vendored
@@ -45,6 +45,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
@@ -53,14 +54,17 @@ import (
|
||||
"k8s.io/client-go/util/certificate"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/client-go/util/integer"
|
||||
csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
pluginwatcherapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
kubeletcertificate "k8s.io/kubernetes/pkg/kubelet/certificate"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cloudresource"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/configmap"
|
||||
@@ -77,12 +81,14 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics/collectors"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network/dns"
|
||||
"k8s.io/kubernetes/pkg/kubelet/nodelease"
|
||||
"k8s.io/kubernetes/pkg/kubelet/pleg"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
"k8s.io/kubernetes/pkg/kubelet/preemption"
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober"
|
||||
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
"k8s.io/kubernetes/pkg/kubelet/remote"
|
||||
"k8s.io/kubernetes/pkg/kubelet/runtimeclass"
|
||||
"k8s.io/kubernetes/pkg/kubelet/secret"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server"
|
||||
serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
@@ -102,7 +108,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
sysctlwhitelist "k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl"
|
||||
utildbus "k8s.io/kubernetes/pkg/util/dbus"
|
||||
kubeio "k8s.io/kubernetes/pkg/util/io"
|
||||
utilipt "k8s.io/kubernetes/pkg/util/iptables"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
@@ -238,16 +243,16 @@ type Dependencies struct {
|
||||
ContainerManager cm.ContainerManager
|
||||
DockerClientConfig *dockershim.ClientConfig
|
||||
EventClient v1core.EventsGetter
|
||||
HeartbeatClient v1core.CoreV1Interface
|
||||
HeartbeatClient clientset.Interface
|
||||
OnHeartbeatFailure func()
|
||||
KubeClient clientset.Interface
|
||||
ExternalKubeClient clientset.Interface
|
||||
CSIClient csiclientset.Interface
|
||||
DynamicKubeClient dynamic.Interface
|
||||
Mounter mount.Interface
|
||||
OOMAdjuster *oom.OOMAdjuster
|
||||
OSInterface kubecontainer.OSInterface
|
||||
PodConfig *config.PodConfig
|
||||
Recorder record.EventRecorder
|
||||
Writer kubeio.Writer
|
||||
VolumePlugins []volume.VolumePlugin
|
||||
DynamicPluginProber volume.DynamicPluginProber
|
||||
TLSOptions *server.TLSOptions
|
||||
@@ -369,11 +374,12 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
}
|
||||
}
|
||||
|
||||
hostname := nodeutil.GetHostname(hostnameOverride)
|
||||
hostname, err := nodeutil.GetHostname(hostnameOverride)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Query the cloud provider for our node name, default to hostname
|
||||
nodeName := types.NodeName(hostname)
|
||||
cloudIPs := []net.IP{}
|
||||
cloudNames := []string{}
|
||||
if kubeDeps.Cloud != nil {
|
||||
var err error
|
||||
instances, ok := kubeDeps.Cloud.Instances()
|
||||
@@ -387,25 +393,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
}
|
||||
|
||||
glog.V(2).Infof("cloud provider determined current node name to be %s", nodeName)
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletServerCertificate) {
|
||||
nodeAddresses, err := instances.NodeAddresses(context.TODO(), nodeName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get the addresses of the current instance from the cloud provider: %v", err)
|
||||
}
|
||||
for _, nodeAddress := range nodeAddresses {
|
||||
switch nodeAddress.Type {
|
||||
case v1.NodeExternalIP, v1.NodeInternalIP:
|
||||
ip := net.ParseIP(nodeAddress.Address)
|
||||
if ip != nil && !ip.IsLoopback() {
|
||||
cloudIPs = append(cloudIPs, ip)
|
||||
}
|
||||
case v1.NodeExternalDNS, v1.NodeInternalDNS, v1.NodeHostName:
|
||||
cloudNames = append(cloudNames, nodeAddress.Address)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if kubeDeps.PodConfig == nil {
|
||||
@@ -491,11 +478,18 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
}
|
||||
httpClient := &http.Client{}
|
||||
parsedNodeIP := net.ParseIP(nodeIP)
|
||||
protocol := utilipt.ProtocolIpv4
|
||||
if parsedNodeIP != nil && parsedNodeIP.To4() == nil {
|
||||
glog.V(0).Infof("IPv6 node IP (%s), assume IPv6 operation", nodeIP)
|
||||
protocol = utilipt.ProtocolIpv6
|
||||
}
|
||||
|
||||
klet := &Kubelet{
|
||||
hostname: hostname,
|
||||
hostnameOverridden: len(hostnameOverride) > 0,
|
||||
nodeName: nodeName,
|
||||
kubeClient: kubeDeps.KubeClient,
|
||||
csiClient: kubeDeps.CSIClient,
|
||||
heartbeatClient: kubeDeps.HeartbeatClient,
|
||||
onRepeatedHeartbeatFailure: kubeDeps.OnHeartbeatFailure,
|
||||
rootDirectory: rootDirectory,
|
||||
@@ -522,7 +516,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
cgroupsPerQOS: kubeCfg.CgroupsPerQOS,
|
||||
cgroupRoot: kubeCfg.CgroupRoot,
|
||||
mounter: kubeDeps.Mounter,
|
||||
writer: kubeDeps.Writer,
|
||||
maxPods: int(kubeCfg.MaxPods),
|
||||
podsPerCore: int(kubeCfg.PodsPerCore),
|
||||
syncLoopMonitor: atomic.Value{},
|
||||
@@ -534,7 +527,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
nodeIPValidator: validateNodeIP,
|
||||
clock: clock.RealClock{},
|
||||
enableControllerAttachDetach: kubeCfg.EnableControllerAttachDetach,
|
||||
iptClient: utilipt.New(utilexec.New(), utildbus.New(), utilipt.ProtocolIpv4),
|
||||
iptClient: utilipt.New(utilexec.New(), utildbus.New(), protocol),
|
||||
makeIPTablesUtilChains: kubeCfg.MakeIPTablesUtilChains,
|
||||
iptablesMasqueradeBit: int(kubeCfg.IPTablesMasqueradeBit),
|
||||
iptablesDropBit: int(kubeCfg.IPTablesDropBit),
|
||||
@@ -545,18 +538,28 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
}
|
||||
|
||||
if klet.cloud != nil {
|
||||
klet.cloudproviderRequestParallelism = make(chan int, 1)
|
||||
klet.cloudproviderRequestSync = make(chan int)
|
||||
// TODO(jchaloup): Make it configurable via --cloud-provider-request-timeout
|
||||
klet.cloudproviderRequestTimeout = 10 * time.Second
|
||||
klet.cloudResourceSyncManager = cloudresource.NewSyncManager(klet.cloud, nodeName, klet.nodeStatusUpdateFrequency)
|
||||
}
|
||||
|
||||
secretManager := secret.NewCachingSecretManager(
|
||||
kubeDeps.KubeClient, manager.GetObjectTTLFromNodeFunc(klet.GetNode))
|
||||
klet.secretManager = secretManager
|
||||
var secretManager secret.Manager
|
||||
var configMapManager configmap.Manager
|
||||
switch kubeCfg.ConfigMapAndSecretChangeDetectionStrategy {
|
||||
case kubeletconfiginternal.WatchChangeDetectionStrategy:
|
||||
secretManager = secret.NewWatchingSecretManager(kubeDeps.KubeClient)
|
||||
configMapManager = configmap.NewWatchingConfigMapManager(kubeDeps.KubeClient)
|
||||
case kubeletconfiginternal.TTLCacheChangeDetectionStrategy:
|
||||
secretManager = secret.NewCachingSecretManager(
|
||||
kubeDeps.KubeClient, manager.GetObjectTTLFromNodeFunc(klet.GetNode))
|
||||
configMapManager = configmap.NewCachingConfigMapManager(
|
||||
kubeDeps.KubeClient, manager.GetObjectTTLFromNodeFunc(klet.GetNode))
|
||||
case kubeletconfiginternal.GetChangeDetectionStrategy:
|
||||
secretManager = secret.NewSimpleSecretManager(kubeDeps.KubeClient)
|
||||
configMapManager = configmap.NewSimpleConfigMapManager(kubeDeps.KubeClient)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown configmap and secret manager mode: %v", kubeCfg.ConfigMapAndSecretChangeDetectionStrategy)
|
||||
}
|
||||
|
||||
configMapManager := configmap.NewCachingConfigMapManager(
|
||||
kubeDeps.KubeClient, manager.GetObjectTTLFromNodeFunc(klet.GetNode))
|
||||
klet.secretManager = secretManager
|
||||
klet.configMapManager = configMapManager
|
||||
|
||||
if klet.experimentalHostUserNamespaceDefaulting {
|
||||
@@ -615,8 +618,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
// Create and start the CRI shim running as a grpc server.
|
||||
streamingConfig := getStreamingConfig(kubeCfg, kubeDeps, crOptions)
|
||||
ds, err := dockershim.NewDockerService(kubeDeps.DockerClientConfig, crOptions.PodSandboxImage, streamingConfig,
|
||||
&pluginSettings, runtimeCgroups, kubeCfg.CgroupDriver, crOptions.DockershimRootDirectory,
|
||||
crOptions.DockerDisableSharedPID, !crOptions.RedirectContainerStreaming)
|
||||
&pluginSettings, runtimeCgroups, kubeCfg.CgroupDriver, crOptions.DockershimRootDirectory, !crOptions.RedirectContainerStreaming)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -654,6 +656,11 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
return nil, err
|
||||
}
|
||||
klet.runtimeService = runtimeService
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClass) && kubeDeps.DynamicKubeClient != nil {
|
||||
klet.runtimeClassManager = runtimeclass.NewManager(kubeDeps.DynamicKubeClient)
|
||||
}
|
||||
|
||||
runtime, err := kuberuntime.NewKubeGenericRuntimeManager(
|
||||
kubecontainer.FilterEventRecorder(kubeDeps.Recorder),
|
||||
klet.livenessManager,
|
||||
@@ -669,10 +676,12 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
float32(kubeCfg.RegistryPullQPS),
|
||||
int(kubeCfg.RegistryBurst),
|
||||
kubeCfg.CPUCFSQuota,
|
||||
kubeCfg.CPUCFSQuotaPeriod,
|
||||
runtimeService,
|
||||
imageService,
|
||||
kubeDeps.ContainerManager.InternalContainerLifecycle(),
|
||||
legacyLogProvider,
|
||||
klet.runtimeClassManager,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -681,6 +690,12 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
klet.streamingRuntime = runtime
|
||||
klet.runner = runtime
|
||||
|
||||
runtimeCache, err := kubecontainer.NewRuntimeCache(klet.containerRuntime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
klet.runtimeCache = runtimeCache
|
||||
|
||||
if cadvisor.UsingLegacyCadvisorStats(containerRuntime, remoteRuntimeEndpoint) {
|
||||
klet.StatsProvider = stats.NewCadvisorStatsProvider(
|
||||
klet.cadvisor,
|
||||
@@ -702,7 +717,9 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
klet.pleg = pleg.NewGenericPLEG(klet.containerRuntime, plegChannelCapacity, plegRelistPeriod, klet.podCache, clock.RealClock{})
|
||||
klet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
|
||||
klet.runtimeState.addHealthCheck("PLEG", klet.pleg.Healthy)
|
||||
klet.updatePodCIDR(kubeCfg.PodCIDR)
|
||||
if err := klet.updatePodCIDR(kubeCfg.PodCIDR); err != nil {
|
||||
glog.Errorf("Pod CIDR update failed %v", err)
|
||||
}
|
||||
|
||||
// setup containerGC
|
||||
containerGC, err := kubecontainer.NewContainerGC(klet.containerRuntime, containerGCPolicy, klet.sourcesReady)
|
||||
@@ -737,29 +754,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
klet.statusManager = status.NewManager(klet.kubeClient, klet.podManager, klet)
|
||||
|
||||
if kubeCfg.ServerTLSBootstrap && kubeDeps.TLSOptions != nil && utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletServerCertificate) {
|
||||
var (
|
||||
ips []net.IP
|
||||
names []string
|
||||
)
|
||||
|
||||
// If the address was explicitly configured, use that. Otherwise, try to
|
||||
// discover addresses from the cloudprovider. Otherwise, make a best guess.
|
||||
if cfgAddress := net.ParseIP(kubeCfg.Address); cfgAddress != nil && !cfgAddress.IsUnspecified() {
|
||||
ips = []net.IP{cfgAddress}
|
||||
names = []string{klet.GetHostname(), hostnameOverride}
|
||||
} else if len(cloudIPs) != 0 || len(cloudNames) != 0 {
|
||||
ips = cloudIPs
|
||||
names = cloudNames
|
||||
} else {
|
||||
localIPs, err := allGlobalUnicastIPs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ips = localIPs
|
||||
names = []string{klet.GetHostname(), hostnameOverride}
|
||||
}
|
||||
|
||||
klet.serverCertificateManager, err = kubeletcertificate.NewKubeletServerCertificateManager(klet.kubeClient, kubeCfg, klet.nodeName, ips, names, certDirectory)
|
||||
klet.serverCertificateManager, err = kubeletcertificate.NewKubeletServerCertificateManager(klet.kubeClient, kubeCfg, klet.nodeName, klet.getLastObservedNodeAddresses, certDirectory)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize certificate manager: %v", err)
|
||||
}
|
||||
@@ -781,6 +776,10 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
|
||||
tokenManager := token.NewManager(kubeDeps.KubeClient)
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.MountPropagation) {
|
||||
glog.Warning("Mount propagation feature gate has been deprecated and will be removed in the next release")
|
||||
}
|
||||
|
||||
klet.volumePluginMgr, err =
|
||||
NewInitializedVolumePluginMgr(klet, secretManager, configMapManager, tokenManager, kubeDeps.VolumePlugins, kubeDeps.DynamicPluginProber)
|
||||
if err != nil {
|
||||
@@ -814,18 +813,12 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
experimentalCheckNodeCapabilitiesBeforeMount,
|
||||
keepTerminatedPodVolumes)
|
||||
|
||||
runtimeCache, err := kubecontainer.NewRuntimeCache(klet.containerRuntime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
klet.runtimeCache = runtimeCache
|
||||
klet.reasonCache = NewReasonCache()
|
||||
klet.workQueue = queue.NewBasicWorkQueue(klet.clock)
|
||||
klet.podWorkers = newPodWorkers(klet.syncPod, kubeDeps.Recorder, klet.workQueue, klet.resyncInterval, backOffPeriod, klet.podCache)
|
||||
|
||||
klet.backOff = flowcontrol.NewBackOff(backOffPeriod, MaxContainerBackOff)
|
||||
klet.podKillingCh = make(chan *kubecontainer.PodPair, podKillingChannelCapacity)
|
||||
klet.setNodeStatusFuncs = klet.defaultNodeStatusFuncs()
|
||||
|
||||
// setup eviction manager
|
||||
evictionManager, evictionAdmitHandler := eviction.NewManager(klet.resourceAnalyzer, evictionConfig, killPodNow(klet.podWorkers, kubeDeps.Recorder), klet.imageManager, klet.containerGC, kubeDeps.Recorder, nodeRef, klet.clock)
|
||||
@@ -869,9 +862,21 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
klet.appArmorValidator = apparmor.NewValidator(containerRuntime)
|
||||
klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewAppArmorAdmitHandler(klet.appArmorValidator))
|
||||
klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewNoNewPrivsAdmitHandler(klet.containerRuntime))
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.NodeLease) {
|
||||
klet.nodeLeaseController = nodelease.NewController(klet.clock, klet.heartbeatClient, string(klet.nodeName), kubeCfg.NodeLeaseDurationSeconds, klet.onRepeatedHeartbeatFailure)
|
||||
}
|
||||
|
||||
klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewProcMountAdmitHandler(klet.containerRuntime))
|
||||
|
||||
// Finally, put the most recent version of the config on the Kubelet, so
|
||||
// people can see how it was configured.
|
||||
klet.kubeletConfiguration = *kubeCfg
|
||||
|
||||
// Generating the status funcs should be the last thing we do,
|
||||
// since this relies on the rest of the Kubelet having been constructed.
|
||||
klet.setNodeStatusFuncs = klet.defaultNodeStatusFuncs()
|
||||
|
||||
return klet, nil
|
||||
}
|
||||
|
||||
@@ -883,14 +888,22 @@ type serviceLister interface {
|
||||
type Kubelet struct {
|
||||
kubeletConfiguration kubeletconfiginternal.KubeletConfiguration
|
||||
|
||||
hostname string
|
||||
// hostname is the hostname the kubelet detected or was given via flag/config
|
||||
hostname string
|
||||
// hostnameOverridden indicates the hostname was overridden via flag/config
|
||||
hostnameOverridden bool
|
||||
|
||||
nodeName types.NodeName
|
||||
runtimeCache kubecontainer.RuntimeCache
|
||||
kubeClient clientset.Interface
|
||||
heartbeatClient v1core.CoreV1Interface
|
||||
csiClient csiclientset.Interface
|
||||
heartbeatClient clientset.Interface
|
||||
iptClient utilipt.Interface
|
||||
rootDirectory string
|
||||
|
||||
lastObservedNodeAddressesMux sync.Mutex
|
||||
lastObservedNodeAddresses []v1.NodeAddress
|
||||
|
||||
// onRepeatedHeartbeatFailure is called when a heartbeat operation fails more than once. optional.
|
||||
onRepeatedHeartbeatFailure func()
|
||||
|
||||
@@ -994,14 +1007,8 @@ type Kubelet struct {
|
||||
|
||||
// Cloud provider interface.
|
||||
cloud cloudprovider.Interface
|
||||
// To keep exclusive access to the cloudproviderRequestParallelism
|
||||
cloudproviderRequestMux sync.Mutex
|
||||
// Keep the count of requests processed in parallel (expected to be 1 at most at a given time)
|
||||
cloudproviderRequestParallelism chan int
|
||||
// Sync with finished requests
|
||||
cloudproviderRequestSync chan int
|
||||
// Request timeout
|
||||
cloudproviderRequestTimeout time.Duration
|
||||
// Handles requests to cloud provider with timeout
|
||||
cloudResourceSyncManager cloudresource.SyncManager
|
||||
|
||||
// Indicates that the node initialization happens in an external cloud controller
|
||||
externalCloudProvider bool
|
||||
@@ -1042,6 +1049,21 @@ type Kubelet struct {
|
||||
// as it takes time to gather all necessary node information.
|
||||
nodeStatusUpdateFrequency time.Duration
|
||||
|
||||
// syncNodeStatusMux is a lock on updating the node status, because this path is not thread-safe.
|
||||
// This lock is used by Kublet.syncNodeStatus function and shouldn't be used anywhere else.
|
||||
syncNodeStatusMux sync.Mutex
|
||||
|
||||
// updatePodCIDRMux is a lock on updating pod CIDR, because this path is not thread-safe.
|
||||
// This lock is used by Kublet.syncNodeStatus function and shouldn't be used anywhere else.
|
||||
updatePodCIDRMux sync.Mutex
|
||||
|
||||
// updateRuntimeMux is a lock on updating runtime, because this path is not thread-safe.
|
||||
// This lock is used by Kublet.updateRuntimeUp function and shouldn't be used anywhere else.
|
||||
updateRuntimeMux sync.Mutex
|
||||
|
||||
// nodeLeaseController claims and renews the node lease for this Kubelet
|
||||
nodeLeaseController nodelease.Controller
|
||||
|
||||
// Generates pod events.
|
||||
pleg pleg.PodLifecycleEventGenerator
|
||||
|
||||
@@ -1066,9 +1088,6 @@ type Kubelet struct {
|
||||
// Mounter to use for volumes.
|
||||
mounter mount.Interface
|
||||
|
||||
// Writer interface to use for volumes.
|
||||
writer kubeio.Writer
|
||||
|
||||
// Manager of non-Runtime containers.
|
||||
containerManager cm.ContainerManager
|
||||
|
||||
@@ -1109,6 +1128,10 @@ type Kubelet struct {
|
||||
// handlers called during the tryUpdateNodeStatus cycle
|
||||
setNodeStatusFuncs []func(*v1.Node) error
|
||||
|
||||
lastNodeUnschedulableLock sync.Mutex
|
||||
// maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
|
||||
lastNodeUnschedulable bool
|
||||
|
||||
// TODO: think about moving this to be centralized in PodWorkers in follow-on.
|
||||
// the list of handlers to call during pod admission.
|
||||
admitHandlers lifecycle.PodAdmitHandlers
|
||||
@@ -1172,13 +1195,16 @@ type Kubelet struct {
|
||||
// pluginwatcher is a utility for Kubelet to register different types of node-level plugins
|
||||
// such as device plugins or CSI plugins. It discovers plugins by monitoring inotify events under the
|
||||
// directory returned by kubelet.getPluginsDir()
|
||||
pluginWatcher pluginwatcher.Watcher
|
||||
pluginWatcher *pluginwatcher.Watcher
|
||||
|
||||
// This flag sets a maximum number of images to report in the node status.
|
||||
nodeStatusMaxImages int32
|
||||
|
||||
// This flag indicates that kubelet should start plugin watcher utility server for discovering Kubelet plugins
|
||||
enablePluginsWatcher bool
|
||||
|
||||
// Handles RuntimeClass objects for the Kubelet.
|
||||
runtimeClassManager *runtimeclass.Manager
|
||||
}
|
||||
|
||||
func allGlobalUnicastIPs() ([]net.IP, error) {
|
||||
@@ -1244,12 +1270,10 @@ func (kl *Kubelet) StartGarbageCollection() {
|
||||
}
|
||||
}, ContainerGCPeriod, wait.NeverStop)
|
||||
|
||||
stopChan := make(chan struct{})
|
||||
defer close(stopChan)
|
||||
// when the high threshold is set to 100, stub the image GC manager
|
||||
if kl.kubeletConfiguration.ImageGCHighThresholdPercent == 100 {
|
||||
glog.V(2).Infof("ImageGCHighThresholdPercent is set 100, Disable image GC")
|
||||
go func() { stopChan <- struct{}{} }()
|
||||
return
|
||||
}
|
||||
|
||||
prevImageGCFailed := false
|
||||
@@ -1272,7 +1296,7 @@ func (kl *Kubelet) StartGarbageCollection() {
|
||||
|
||||
glog.V(vLevel).Infof("Image garbage collection succeeded")
|
||||
}
|
||||
}, ImageGCPeriod, stopChan)
|
||||
}, ImageGCPeriod, wait.NeverStop)
|
||||
}
|
||||
|
||||
// initializeModules will initialize internal modules that do not require the container runtime to be up.
|
||||
@@ -1342,7 +1366,9 @@ func (kl *Kubelet) initializeRuntimeDependentModules() {
|
||||
kl.containerLogManager.Start()
|
||||
if kl.enablePluginsWatcher {
|
||||
// Adding Registration Callback function for CSI Driver
|
||||
kl.pluginWatcher.AddHandler("CSIPlugin", csi.RegistrationCallback)
|
||||
kl.pluginWatcher.AddHandler("CSIPlugin", pluginwatcher.PluginHandler(csi.PluginHandler))
|
||||
// Adding Registration Callback function for Device Manager
|
||||
kl.pluginWatcher.AddHandler(pluginwatcherapi.DevicePlugin, kl.containerManager.GetPluginRegistrationHandler())
|
||||
// Start the plugin watcher
|
||||
glog.V(4).Infof("starting watcher")
|
||||
if err := kl.pluginWatcher.Start(); err != nil {
|
||||
@@ -1361,6 +1387,11 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
|
||||
glog.Warning("No api server defined - no node status update will be sent.")
|
||||
}
|
||||
|
||||
// Start the cloud provider sync manager
|
||||
if kl.cloudResourceSyncManager != nil {
|
||||
go kl.cloudResourceSyncManager.Run(wait.NeverStop)
|
||||
}
|
||||
|
||||
if err := kl.initializeModules(); err != nil {
|
||||
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.KubeletSetupFailed, err.Error())
|
||||
glog.Fatal(err)
|
||||
@@ -1372,6 +1403,12 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
|
||||
if kl.kubeClient != nil {
|
||||
// Start syncing node status immediately, this may set up things the runtime needs to run.
|
||||
go wait.Until(kl.syncNodeStatus, kl.nodeStatusUpdateFrequency, wait.NeverStop)
|
||||
go kl.fastStatusUpdateOnce()
|
||||
|
||||
// start syncing lease
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.NodeLease) {
|
||||
go kl.nodeLeaseController.Run(wait.NeverStop)
|
||||
}
|
||||
}
|
||||
go wait.Until(kl.updateRuntimeUp, 5*time.Second, wait.NeverStop)
|
||||
|
||||
@@ -1384,15 +1421,15 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
|
||||
// handled by pod workers).
|
||||
go wait.Until(kl.podKiller, 1*time.Second, wait.NeverStop)
|
||||
|
||||
// Start gorouting responsible for checking limits in resolv.conf
|
||||
if kl.dnsConfigurer.ResolverConfig != "" {
|
||||
go wait.Until(func() { kl.dnsConfigurer.CheckLimitsForResolvConf() }, 30*time.Second, wait.NeverStop)
|
||||
}
|
||||
|
||||
// Start component sync loops.
|
||||
kl.statusManager.Start()
|
||||
kl.probeManager.Start()
|
||||
|
||||
// Start syncing RuntimeClasses if enabled.
|
||||
if kl.runtimeClassManager != nil {
|
||||
go kl.runtimeClassManager.Run(wait.NeverStop)
|
||||
}
|
||||
|
||||
// Start the pod lifecycle event generator.
|
||||
kl.pleg.Start()
|
||||
kl.syncLoop(updates, kl)
|
||||
@@ -1627,9 +1664,16 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
||||
result := kl.containerRuntime.SyncPod(pod, apiPodStatus, podStatus, pullSecrets, kl.backOff)
|
||||
kl.reasonCache.Update(pod.UID, result)
|
||||
if err := result.Error(); err != nil {
|
||||
// Do not record an event here, as we keep all event logging for sync pod failures
|
||||
// local to container runtime so we get better errors
|
||||
return err
|
||||
// Do not return error if the only failures were pods in backoff
|
||||
for _, r := range result.SyncResults {
|
||||
if r.Error != kubecontainer.ErrCrashLoopBackOff && r.Error != images.ErrImagePullBackOff {
|
||||
// Do not record an event here, as we keep all event logging for sync pod failures
|
||||
// local to container runtime so we get better errors
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -1843,7 +1887,7 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle
|
||||
// once we have checkpointing.
|
||||
handler.HandlePodAdditions(u.Pods)
|
||||
case kubetypes.UPDATE:
|
||||
glog.V(2).Infof("SyncLoop (UPDATE, %q): %q", u.Source, format.PodsWithDeletiontimestamps(u.Pods))
|
||||
glog.V(2).Infof("SyncLoop (UPDATE, %q): %q", u.Source, format.PodsWithDeletionTimestamps(u.Pods))
|
||||
handler.HandlePodUpdates(u.Pods)
|
||||
case kubetypes.REMOVE:
|
||||
glog.V(2).Infof("SyncLoop (REMOVE, %q): %q", u.Source, format.Pods(u.Pods))
|
||||
@@ -1978,6 +2022,10 @@ func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) {
|
||||
start := kl.clock.Now()
|
||||
sort.Sort(sliceutils.PodsByCreationTime(pods))
|
||||
for _, pod := range pods {
|
||||
// Responsible for checking limits in resolv.conf
|
||||
if kl.dnsConfigurer != nil && kl.dnsConfigurer.ResolverConfig != "" {
|
||||
kl.dnsConfigurer.CheckLimitsForResolvConf()
|
||||
}
|
||||
existingPods := kl.podManager.GetPods()
|
||||
// Always add the pod to the pod manager. Kubelet relies on the pod
|
||||
// manager as the source of truth for the desired state. If a pod does
|
||||
@@ -2015,6 +2063,10 @@ func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) {
|
||||
func (kl *Kubelet) HandlePodUpdates(pods []*v1.Pod) {
|
||||
start := kl.clock.Now()
|
||||
for _, pod := range pods {
|
||||
// Responsible for checking limits in resolv.conf
|
||||
if kl.dnsConfigurer != nil && kl.dnsConfigurer.ResolverConfig != "" {
|
||||
kl.dnsConfigurer.CheckLimitsForResolvConf()
|
||||
}
|
||||
kl.podManager.UpdatePod(pod)
|
||||
if kubepod.IsMirrorPod(pod) {
|
||||
kl.handleMirrorPod(pod, start)
|
||||
@@ -2094,6 +2146,9 @@ func (kl *Kubelet) LatestLoopEntryTime() time.Time {
|
||||
// and returns an error if the status check fails. If the status check is OK,
|
||||
// update the container runtime uptime in the kubelet runtimeState.
|
||||
func (kl *Kubelet) updateRuntimeUp() {
|
||||
kl.updateRuntimeMux.Lock()
|
||||
defer kl.updateRuntimeMux.Unlock()
|
||||
|
||||
s, err := kl.containerRuntime.Status()
|
||||
if err != nil {
|
||||
glog.Errorf("Container runtime sanity check failed: %v", err)
|
||||
@@ -2168,6 +2223,31 @@ func (kl *Kubelet) cleanUpContainersInPod(podID types.UID, exitedContainerID str
|
||||
}
|
||||
}
|
||||
|
||||
// fastStatusUpdateOnce starts a loop that checks the internal node indexer cache for when a CIDR
|
||||
// is applied and tries to update pod CIDR immediately. After pod CIDR is updated it fires off
|
||||
// a runtime update and a node status update. Function returns after one successful node status update.
|
||||
// Function is executed only during Kubelet start which improves latency to ready node by updating
|
||||
// pod CIDR, runtime status and node statuses ASAP.
|
||||
func (kl *Kubelet) fastStatusUpdateOnce() {
|
||||
for {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
node, err := kl.GetNode()
|
||||
if err != nil {
|
||||
glog.Errorf(err.Error())
|
||||
continue
|
||||
}
|
||||
if node.Spec.PodCIDR != "" {
|
||||
if err := kl.updatePodCIDR(node.Spec.PodCIDR); err != nil {
|
||||
glog.Errorf("Pod CIDR update failed %v", err)
|
||||
continue
|
||||
}
|
||||
kl.updateRuntimeUp()
|
||||
kl.syncNodeStatus()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// isSyncPodWorthy filters out events that are not worthy of pod syncing
|
||||
func isSyncPodWorthy(event *pleg.PodLifecycleEvent) bool {
|
||||
// ContatnerRemoved doesn't affect pod state
|
||||
|
Reference in New Issue
Block a user