Bumping k8s dependencies to 1.13
This commit is contained in:
64
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/BUILD
generated
vendored
64
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/BUILD
generated
vendored
@@ -19,26 +19,26 @@ go_library(
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -74,19 +74,19 @@ go_test(
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/OWNERS
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/OWNERS
generated
vendored
@@ -1,6 +1,7 @@
|
||||
approvers:
|
||||
- gmarek
|
||||
- bowei
|
||||
- k82cn
|
||||
reviewers:
|
||||
- gmarek
|
||||
- smarterclayton
|
||||
|
116
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/node_lifecycle_controller.go
generated
vendored
116
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/node_lifecycle_controller.go
generated
vendored
@@ -24,6 +24,8 @@ package nodelifecycle
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -46,6 +48,7 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
v1node "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@@ -56,7 +59,6 @@ import (
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -65,8 +67,6 @@ func init() {
|
||||
}
|
||||
|
||||
var (
|
||||
gracefulDeletionVersion = utilversion.MustParseSemantic("v1.1.0")
|
||||
|
||||
// UnreachableTaintTemplate is the taint for when a node becomes unreachable.
|
||||
UnreachableTaintTemplate = &v1.Taint{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
@@ -80,15 +80,35 @@ var (
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
}
|
||||
|
||||
nodeConditionToTaintKeyMap = map[v1.NodeConditionType]string{
|
||||
v1.NodeMemoryPressure: algorithm.TaintNodeMemoryPressure,
|
||||
v1.NodeOutOfDisk: algorithm.TaintNodeOutOfDisk,
|
||||
v1.NodeDiskPressure: algorithm.TaintNodeDiskPressure,
|
||||
v1.NodeNetworkUnavailable: algorithm.TaintNodeNetworkUnavailable,
|
||||
v1.NodePIDPressure: algorithm.TaintNodePIDPressure,
|
||||
// map {NodeConditionType: {ConditionStatus: TaintKey}}
|
||||
// represents which NodeConditionType under which ConditionStatus should be
|
||||
// tainted with which TaintKey
|
||||
// for certain NodeConditionType, there are multiple {ConditionStatus,TaintKey} pairs
|
||||
nodeConditionToTaintKeyStatusMap = map[v1.NodeConditionType]map[v1.ConditionStatus]string{
|
||||
v1.NodeReady: {
|
||||
v1.ConditionFalse: algorithm.TaintNodeNotReady,
|
||||
v1.ConditionUnknown: algorithm.TaintNodeUnreachable,
|
||||
},
|
||||
v1.NodeMemoryPressure: {
|
||||
v1.ConditionTrue: algorithm.TaintNodeMemoryPressure,
|
||||
},
|
||||
v1.NodeOutOfDisk: {
|
||||
v1.ConditionTrue: algorithm.TaintNodeOutOfDisk,
|
||||
},
|
||||
v1.NodeDiskPressure: {
|
||||
v1.ConditionTrue: algorithm.TaintNodeDiskPressure,
|
||||
},
|
||||
v1.NodeNetworkUnavailable: {
|
||||
v1.ConditionTrue: algorithm.TaintNodeNetworkUnavailable,
|
||||
},
|
||||
v1.NodePIDPressure: {
|
||||
v1.ConditionTrue: algorithm.TaintNodePIDPressure,
|
||||
},
|
||||
}
|
||||
|
||||
taintKeyToNodeConditionMap = map[string]v1.NodeConditionType{
|
||||
algorithm.TaintNodeNotReady: v1.NodeReady,
|
||||
algorithm.TaintNodeUnreachable: v1.NodeReady,
|
||||
algorithm.TaintNodeNetworkUnavailable: v1.NodeNetworkUnavailable,
|
||||
algorithm.TaintNodeMemoryPressure: v1.NodeMemoryPressure,
|
||||
algorithm.TaintNodeOutOfDisk: v1.NodeOutOfDisk,
|
||||
@@ -201,6 +221,8 @@ type Controller struct {
|
||||
// if set to true, NodeController will taint Nodes based on its condition for 'NetworkUnavailable',
|
||||
// 'MemoryPressure', 'OutOfDisk' and 'DiskPressure'.
|
||||
taintNodeByCondition bool
|
||||
|
||||
nodeUpdateQueue workqueue.Interface
|
||||
}
|
||||
|
||||
// NewNodeLifecycleController returns a new taint controller.
|
||||
@@ -259,6 +281,7 @@ func NewNodeLifecycleController(podInformer coreinformers.PodInformer,
|
||||
runTaintManager: runTaintManager,
|
||||
useTaintBasedEvictions: useTaintBasedEvictions && runTaintManager,
|
||||
taintNodeByCondition: taintNodeByCondition,
|
||||
nodeUpdateQueue: workqueue.New(),
|
||||
}
|
||||
if useTaintBasedEvictions {
|
||||
glog.Infof("Controller is using taint based evictions.")
|
||||
@@ -326,10 +349,12 @@ func NewNodeLifecycleController(podInformer coreinformers.PodInformer,
|
||||
glog.Infof("Controller will taint node by condition.")
|
||||
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: nodeutil.CreateAddNodeHandler(func(node *v1.Node) error {
|
||||
return nc.doNoScheduleTaintingPass(node)
|
||||
nc.nodeUpdateQueue.Add(node.Name)
|
||||
return nil
|
||||
}),
|
||||
UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
|
||||
return nc.doNoScheduleTaintingPass(newNode)
|
||||
nc.nodeUpdateQueue.Add(newNode.Name)
|
||||
return nil
|
||||
}),
|
||||
})
|
||||
}
|
||||
@@ -366,18 +391,32 @@ func (nc *Controller) Run(stopCh <-chan struct{}) {
|
||||
}
|
||||
|
||||
if nc.runTaintManager {
|
||||
go nc.taintManager.Run(wait.NeverStop)
|
||||
go nc.taintManager.Run(stopCh)
|
||||
}
|
||||
|
||||
if nc.taintNodeByCondition {
|
||||
// Close node update queue to cleanup go routine.
|
||||
defer nc.nodeUpdateQueue.ShutDown()
|
||||
|
||||
// Start workers to update NoSchedule taint for nodes.
|
||||
for i := 0; i < scheduler.UpdateWorkerSize; i++ {
|
||||
// Thanks to "workqueue", each worker just need to get item from queue, because
|
||||
// the item is flagged when got from queue: if new event come, the new item will
|
||||
// be re-queued until "Done", so no more than one worker handle the same item and
|
||||
// no event missed.
|
||||
go wait.Until(nc.doNoScheduleTaintingPassWorker, time.Second, stopCh)
|
||||
}
|
||||
}
|
||||
|
||||
if nc.useTaintBasedEvictions {
|
||||
// Handling taint based evictions. Because we don't want a dedicated logic in TaintManager for NC-originated
|
||||
// taints and we normally don't rate limit evictions caused by taints, we need to rate limit adding taints.
|
||||
go wait.Until(nc.doNoExecuteTaintingPass, scheduler.NodeEvictionPeriod, wait.NeverStop)
|
||||
go wait.Until(nc.doNoExecuteTaintingPass, scheduler.NodeEvictionPeriod, stopCh)
|
||||
} else {
|
||||
// Managing eviction of nodes:
|
||||
// When we delete pods off a node, if the node was not empty at the time we then
|
||||
// queue an eviction watcher. If we hit an error, retry deletion.
|
||||
go wait.Until(nc.doEvictionPass, scheduler.NodeEvictionPeriod, wait.NeverStop)
|
||||
go wait.Until(nc.doEvictionPass, scheduler.NodeEvictionPeriod, stopCh)
|
||||
}
|
||||
|
||||
// Incorporate the results of node status pushed from kubelet to master.
|
||||
@@ -385,7 +424,7 @@ func (nc *Controller) Run(stopCh <-chan struct{}) {
|
||||
if err := nc.monitorNodeStatus(); err != nil {
|
||||
glog.Errorf("Error monitoring node status: %v", err)
|
||||
}
|
||||
}, nc.nodeMonitorPeriod, wait.NeverStop)
|
||||
}, nc.nodeMonitorPeriod, stopCh)
|
||||
|
||||
<-stopCh
|
||||
}
|
||||
@@ -428,14 +467,41 @@ func (nc *Controller) doFixDeprecatedTaintKeyPass(node *v1.Node) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nc *Controller) doNoScheduleTaintingPass(node *v1.Node) error {
|
||||
func (nc *Controller) doNoScheduleTaintingPassWorker() {
|
||||
for {
|
||||
obj, shutdown := nc.nodeUpdateQueue.Get()
|
||||
// "nodeUpdateQueue" will be shutdown when "stopCh" closed;
|
||||
// we do not need to re-check "stopCh" again.
|
||||
if shutdown {
|
||||
return
|
||||
}
|
||||
nodeName := obj.(string)
|
||||
|
||||
if err := nc.doNoScheduleTaintingPass(nodeName); err != nil {
|
||||
// TODO (k82cn): Add nodeName back to the queue.
|
||||
glog.Errorf("Failed to taint NoSchedule on node <%s>, requeue it: %v", nodeName, err)
|
||||
}
|
||||
nc.nodeUpdateQueue.Done(nodeName)
|
||||
}
|
||||
}
|
||||
|
||||
func (nc *Controller) doNoScheduleTaintingPass(nodeName string) error {
|
||||
node, err := nc.nodeLister.Get(nodeName)
|
||||
if err != nil {
|
||||
// If node not found, just ignore it.
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Map node's condition to Taints.
|
||||
taints := []v1.Taint{}
|
||||
var taints []v1.Taint
|
||||
for _, condition := range node.Status.Conditions {
|
||||
if _, found := nodeConditionToTaintKeyMap[condition.Type]; found {
|
||||
if condition.Status == v1.ConditionTrue {
|
||||
if taintMap, found := nodeConditionToTaintKeyStatusMap[condition.Type]; found {
|
||||
if taintKey, found := taintMap[condition.Status]; found {
|
||||
taints = append(taints, v1.Taint{
|
||||
Key: nodeConditionToTaintKeyMap[condition.Type],
|
||||
Key: taintKey,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
}
|
||||
@@ -451,6 +517,10 @@ func (nc *Controller) doNoScheduleTaintingPass(node *v1.Node) error {
|
||||
|
||||
// Get exist taints of node.
|
||||
nodeTaints := taintutils.TaintSetFilter(node.Spec.Taints, func(t *v1.Taint) bool {
|
||||
// only NoSchedule taints are candidates to be compared with "taints" later
|
||||
if t.Effect != v1.TaintEffectNoSchedule {
|
||||
return false
|
||||
}
|
||||
// Find unschedulable taint of node.
|
||||
if t.Key == algorithm.TaintNodeUnschedulable {
|
||||
return true
|
||||
@@ -1176,3 +1246,9 @@ func (nc *Controller) ComputeZoneState(nodeReadyConditions []*v1.NodeCondition)
|
||||
return notReadyNodes, stateNormal
|
||||
}
|
||||
}
|
||||
|
||||
func hash(val string, max int) int {
|
||||
hasher := fnv.New32a()
|
||||
io.WriteString(hasher, val)
|
||||
return int(hasher.Sum32()) % max
|
||||
}
|
||||
|
58
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go
generated
vendored
58
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go
generated
vendored
@@ -2163,6 +2163,14 @@ func TestTaintsNodeByCondition(t *testing.T) {
|
||||
Key: algorithm.TaintNodeNetworkUnavailable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
notReadyTaint := &v1.Taint{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
unreachableTaint := &v1.Taint{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
Name string
|
||||
@@ -2271,6 +2279,54 @@ func TestTaintsNodeByCondition(t *testing.T) {
|
||||
},
|
||||
ExpectedTaints: []*v1.Taint{networkUnavailableTaint},
|
||||
},
|
||||
{
|
||||
Name: "Ready is false",
|
||||
Node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionFalse,
|
||||
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ExpectedTaints: []*v1.Taint{notReadyTaint},
|
||||
},
|
||||
{
|
||||
Name: "Ready is unknown",
|
||||
Node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionUnknown,
|
||||
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ExpectedTaints: []*v1.Taint{unreachableTaint},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -2278,7 +2334,7 @@ func TestTaintsNodeByCondition(t *testing.T) {
|
||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
nodeController.doNoScheduleTaintingPass(test.Node)
|
||||
nodeController.doNoScheduleTaintingPass(test.Node.Name)
|
||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
36
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler/BUILD
generated
vendored
36
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler/BUILD
generated
vendored
@@ -12,19 +12,19 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -38,12 +38,12 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/controller/testutil:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
39
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler/taint_manager.go
generated
vendored
39
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler/taint_manager.go
generated
vendored
@@ -40,9 +40,15 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
nodeUpdateChannelSize = 10
|
||||
podUpdateChannelSize = 1
|
||||
retries = 5
|
||||
// TODO (k82cn): Figure out a reasonable number of workers/channels and propagate
|
||||
// the number of workers up making it a paramater of Run() function.
|
||||
|
||||
// NodeUpdateChannelSize defines the size of channel for node update events.
|
||||
NodeUpdateChannelSize = 10
|
||||
// UpdateWorkerSize defines the size of workers for node update or/and pod update.
|
||||
UpdateWorkerSize = 8
|
||||
podUpdateChannelSize = 1
|
||||
retries = 5
|
||||
)
|
||||
|
||||
// Needed to make workqueue work
|
||||
@@ -80,10 +86,10 @@ func (p *podUpdateItem) nodeName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func hash(val string) int {
|
||||
func hash(val string, max int) int {
|
||||
hasher := fnv.New32a()
|
||||
io.WriteString(hasher, val)
|
||||
return int(hasher.Sum32())
|
||||
return int(hasher.Sum32() % uint32(max))
|
||||
}
|
||||
|
||||
// NoExecuteTaintManager listens to Taint/Toleration changes and is responsible for removing Pods
|
||||
@@ -204,11 +210,8 @@ func NewNoExecuteTaintManager(c clientset.Interface) *NoExecuteTaintManager {
|
||||
func (tc *NoExecuteTaintManager) Run(stopCh <-chan struct{}) {
|
||||
glog.V(0).Infof("Starting NoExecuteTaintManager")
|
||||
|
||||
// TODO: Figure out a reasonable number of workers and propagate the
|
||||
// number of workers up making it a paramater of Run() function.
|
||||
workers := 8
|
||||
for i := 0; i < workers; i++ {
|
||||
tc.nodeUpdateChannels = append(tc.nodeUpdateChannels, make(chan *nodeUpdateItem, nodeUpdateChannelSize))
|
||||
for i := 0; i < UpdateWorkerSize; i++ {
|
||||
tc.nodeUpdateChannels = append(tc.nodeUpdateChannels, make(chan *nodeUpdateItem, NodeUpdateChannelSize))
|
||||
tc.podUpdateChannels = append(tc.podUpdateChannels, make(chan *podUpdateItem, podUpdateChannelSize))
|
||||
}
|
||||
|
||||
@@ -221,12 +224,12 @@ func (tc *NoExecuteTaintManager) Run(stopCh <-chan struct{}) {
|
||||
break
|
||||
}
|
||||
nodeUpdate := item.(*nodeUpdateItem)
|
||||
hash := hash(nodeUpdate.name())
|
||||
hash := hash(nodeUpdate.name(), UpdateWorkerSize)
|
||||
select {
|
||||
case <-stopCh:
|
||||
tc.nodeUpdateQueue.Done(item)
|
||||
break
|
||||
case tc.nodeUpdateChannels[hash%workers] <- nodeUpdate:
|
||||
return
|
||||
case tc.nodeUpdateChannels[hash] <- nodeUpdate:
|
||||
}
|
||||
tc.nodeUpdateQueue.Done(item)
|
||||
}
|
||||
@@ -239,20 +242,20 @@ func (tc *NoExecuteTaintManager) Run(stopCh <-chan struct{}) {
|
||||
break
|
||||
}
|
||||
podUpdate := item.(*podUpdateItem)
|
||||
hash := hash(podUpdate.nodeName())
|
||||
hash := hash(podUpdate.nodeName(), UpdateWorkerSize)
|
||||
select {
|
||||
case <-stopCh:
|
||||
tc.podUpdateQueue.Done(item)
|
||||
break
|
||||
case tc.podUpdateChannels[hash%workers] <- podUpdate:
|
||||
return
|
||||
case tc.podUpdateChannels[hash] <- podUpdate:
|
||||
}
|
||||
tc.podUpdateQueue.Done(item)
|
||||
}
|
||||
}(stopCh)
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(workers)
|
||||
for i := 0; i < workers; i++ {
|
||||
wg.Add(UpdateWorkerSize)
|
||||
for i := 0; i < UpdateWorkerSize; i++ {
|
||||
go tc.worker(i, wg.Done, stopCh)
|
||||
}
|
||||
wg.Wait()
|
||||
|
Reference in New Issue
Block a user