Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -6,6 +6,7 @@ go_library(
"cache.go",
"interface.go",
"node_info.go",
"node_tree.go",
"util.go",
],
importpath = "k8s.io/kubernetes/pkg/scheduler/cache",
@@ -15,13 +16,15 @@ go_library(
"//pkg/features:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//pkg/util/node:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
],
)
@@ -30,22 +33,25 @@ go_test(
srcs = [
"cache_test.go",
"node_info_test.go",
"node_tree_test.go",
"util_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//pkg/util/parsers:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
],
)

View File

@@ -23,6 +23,7 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
@@ -51,14 +52,17 @@ type schedulerCache struct {
period time.Duration
// This mutex guards all fields within this cache struct.
mu sync.Mutex
mu sync.RWMutex
// a set of assumed pod keys.
// The key could further be used to get an entry in podStates.
assumedPods map[string]bool
// a map from pod key to podState.
podStates map[string]*podState
nodes map[string]*NodeInfo
nodeTree *NodeTree
pdbs map[string]*policy.PodDisruptionBudget
// A map from image name to its imageState.
imageStates map[string]*imageState
}
type podState struct {
@@ -69,6 +73,29 @@ type podState struct {
bindingFinished bool
}
type imageState struct {
// Size of the image
size int64
// A set of node names for nodes having this image present
nodes sets.String
}
// ImageStateSummary provides summarized information about the state of an image.
type ImageStateSummary struct {
// Size of the image
Size int64
// Used to track how many nodes have this image
NumNodes int
}
// createImageStateSummary returns a summarizing snapshot of the given image's state.
func (cache *schedulerCache) createImageStateSummary(state *imageState) *ImageStateSummary {
return &ImageStateSummary{
Size: state.size,
NumNodes: len(state.nodes),
}
}
func newSchedulerCache(ttl, period time.Duration, stop <-chan struct{}) *schedulerCache {
return &schedulerCache{
ttl: ttl,
@@ -76,17 +103,19 @@ func newSchedulerCache(ttl, period time.Duration, stop <-chan struct{}) *schedul
stop: stop,
nodes: make(map[string]*NodeInfo),
nodeTree: newNodeTree(nil),
assumedPods: make(map[string]bool),
podStates: make(map[string]*podState),
pdbs: make(map[string]*policy.PodDisruptionBudget),
imageStates: make(map[string]*imageState),
}
}
// Snapshot takes a snapshot of the current schedulerCache. The method has performance impact,
// and should be only used in non-critical path.
func (cache *schedulerCache) Snapshot() *Snapshot {
cache.mu.Lock()
defer cache.mu.Unlock()
cache.mu.RLock()
defer cache.mu.RUnlock()
nodes := make(map[string]*NodeInfo)
for k, v := range cache.nodes {
@@ -113,6 +142,7 @@ func (cache *schedulerCache) Snapshot() *Snapshot {
func (cache *schedulerCache) UpdateNodeNameToInfoMap(nodeNameToInfo map[string]*NodeInfo) error {
cache.mu.Lock()
defer cache.mu.Unlock()
for name, info := range cache.nodes {
if utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && info.TransientInfo != nil {
// Transient scheduler info is reset here.
@@ -136,8 +166,8 @@ func (cache *schedulerCache) List(selector labels.Selector) ([]*v1.Pod, error) {
}
func (cache *schedulerCache) FilteredList(podFilter PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
cache.mu.Lock()
defer cache.mu.Unlock()
cache.mu.RLock()
defer cache.mu.RUnlock()
// podFilter is expected to return true for most or all of the pods. We
// can avoid expensive array growth without wasting too much memory by
// pre-allocating capacity.
@@ -188,8 +218,8 @@ func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error {
return err
}
cache.mu.Lock()
defer cache.mu.Unlock()
cache.mu.RLock()
defer cache.mu.RUnlock()
glog.V(5).Infof("Finished binding for pod %v. Can be expired.", key)
currState, ok := cache.podStates[key]
@@ -317,6 +347,7 @@ func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error {
if err := cache.updatePod(oldPod, newPod); err != nil {
return err
}
currState.pod = newPod
default:
return fmt.Errorf("pod %v is not added to scheduler cache, so cannot be updated", key)
}
@@ -358,8 +389,8 @@ func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) {
return false, err
}
cache.mu.Lock()
defer cache.mu.Unlock()
cache.mu.RLock()
defer cache.mu.RUnlock()
b, found := cache.assumedPods[key]
if !found {
@@ -374,8 +405,8 @@ func (cache *schedulerCache) GetPod(pod *v1.Pod) (*v1.Pod, error) {
return nil, err
}
cache.mu.Lock()
defer cache.mu.Unlock()
cache.mu.RLock()
defer cache.mu.RUnlock()
podState, ok := cache.podStates[key]
if !ok {
@@ -393,7 +424,12 @@ func (cache *schedulerCache) AddNode(node *v1.Node) error {
if !ok {
n = NewNodeInfo()
cache.nodes[node.Name] = n
} else {
cache.removeNodeImageStates(n.node)
}
cache.nodeTree.AddNode(node)
cache.addNodeImageStates(node, n)
return n.SetNode(node)
}
@@ -405,7 +441,12 @@ func (cache *schedulerCache) UpdateNode(oldNode, newNode *v1.Node) error {
if !ok {
n = NewNodeInfo()
cache.nodes[newNode.Name] = n
} else {
cache.removeNodeImageStates(n.node)
}
cache.nodeTree.UpdateNode(oldNode, newNode)
cache.addNodeImageStates(newNode, n)
return n.SetNode(newNode)
}
@@ -424,9 +465,63 @@ func (cache *schedulerCache) RemoveNode(node *v1.Node) error {
if len(n.pods) == 0 && n.node == nil {
delete(cache.nodes, node.Name)
}
cache.nodeTree.RemoveNode(node)
cache.removeNodeImageStates(node)
return nil
}
// addNodeImageStates adds states of the images on given node to the given nodeInfo and update the imageStates in
// scheduler cache. This function assumes the lock to scheduler cache has been acquired.
func (cache *schedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *NodeInfo) {
newSum := make(map[string]*ImageStateSummary)
for _, image := range node.Status.Images {
for _, name := range image.Names {
// update the entry in imageStates
state, ok := cache.imageStates[name]
if !ok {
state = &imageState{
size: image.SizeBytes,
nodes: sets.NewString(node.Name),
}
cache.imageStates[name] = state
} else {
state.nodes.Insert(node.Name)
}
// create the imageStateSummary for this image
if _, ok := newSum[name]; !ok {
newSum[name] = cache.createImageStateSummary(state)
}
}
}
nodeInfo.imageStates = newSum
}
// removeNodeImageStates removes the given node record from image entries having the node
// in imageStates cache. After the removal, if any image becomes free, i.e., the image
// is no longer available on any node, the image entry will be removed from imageStates.
func (cache *schedulerCache) removeNodeImageStates(node *v1.Node) {
if node == nil {
return
}
for _, image := range node.Status.Images {
for _, name := range image.Names {
state, ok := cache.imageStates[name]
if ok {
state.nodes.Delete(node.Name)
if len(state.nodes) == 0 {
// Remove the unused image to make sure the length of
// imageStates represents the total number of different
// images on all nodes
delete(cache.imageStates, name)
}
}
}
}
}
func (cache *schedulerCache) AddPDB(pdb *policy.PodDisruptionBudget) error {
cache.mu.Lock()
defer cache.mu.Unlock()
@@ -449,8 +544,8 @@ func (cache *schedulerCache) RemovePDB(pdb *policy.PodDisruptionBudget) error {
}
func (cache *schedulerCache) ListPDBs(selector labels.Selector) ([]*policy.PodDisruptionBudget, error) {
cache.mu.Lock()
defer cache.mu.Unlock()
cache.mu.RLock()
defer cache.mu.RUnlock()
var pdbs []*policy.PodDisruptionBudget
for _, pdb := range cache.pdbs {
if selector.Matches(labels.Set(pdb.Labels)) {
@@ -461,8 +556,8 @@ func (cache *schedulerCache) ListPDBs(selector labels.Selector) ([]*policy.PodDi
}
func (cache *schedulerCache) IsUpToDate(n *NodeInfo) bool {
cache.mu.Lock()
defer cache.mu.Unlock()
cache.mu.RLock()
defer cache.mu.RUnlock()
node, ok := cache.nodes[n.Node().Name]
return ok && n.generation == node.generation
}
@@ -508,3 +603,7 @@ func (cache *schedulerCache) expirePod(key string, ps *podState) error {
delete(cache.podStates, key)
return nil
}
func (cache *schedulerCache) NodeTree() *NodeTree {
return cache.nodeTree
}

View File

@@ -108,7 +108,7 @@ func TestAssumePodScheduled(t *testing.T) {
allocatableResource: &Resource{},
pods: []*v1.Pod{testPods[0]},
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
imageSizes: map[string]int64{},
imageStates: make(map[string]*ImageStateSummary),
},
}, {
pods: []*v1.Pod{testPods[1], testPods[2]},
@@ -125,7 +125,7 @@ func TestAssumePodScheduled(t *testing.T) {
allocatableResource: &Resource{},
pods: []*v1.Pod{testPods[1], testPods[2]},
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(),
imageSizes: map[string]int64{},
imageStates: make(map[string]*ImageStateSummary),
},
}, { // test non-zero request
pods: []*v1.Pod{testPods[3]},
@@ -142,7 +142,7 @@ func TestAssumePodScheduled(t *testing.T) {
allocatableResource: &Resource{},
pods: []*v1.Pod{testPods[3]},
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
imageSizes: map[string]int64{},
imageStates: make(map[string]*ImageStateSummary),
},
}, {
pods: []*v1.Pod{testPods[4]},
@@ -160,7 +160,7 @@ func TestAssumePodScheduled(t *testing.T) {
allocatableResource: &Resource{},
pods: []*v1.Pod{testPods[4]},
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
imageSizes: map[string]int64{},
imageStates: make(map[string]*ImageStateSummary),
},
}, {
pods: []*v1.Pod{testPods[4], testPods[5]},
@@ -178,7 +178,7 @@ func TestAssumePodScheduled(t *testing.T) {
allocatableResource: &Resource{},
pods: []*v1.Pod{testPods[4], testPods[5]},
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(),
imageSizes: map[string]int64{},
imageStates: make(map[string]*ImageStateSummary),
},
}, {
pods: []*v1.Pod{testPods[6]},
@@ -195,7 +195,7 @@ func TestAssumePodScheduled(t *testing.T) {
allocatableResource: &Resource{},
pods: []*v1.Pod{testPods[6]},
usedPorts: newHostPortInfoBuilder().build(),
imageSizes: map[string]int64{},
imageStates: make(map[string]*ImageStateSummary),
},
},
}
@@ -275,7 +275,7 @@ func TestExpirePod(t *testing.T) {
allocatableResource: &Resource{},
pods: []*v1.Pod{testPods[1]},
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
imageSizes: map[string]int64{},
imageStates: make(map[string]*ImageStateSummary),
},
}}
@@ -328,7 +328,7 @@ func TestAddPodWillConfirm(t *testing.T) {
allocatableResource: &Resource{},
pods: []*v1.Pod{testPods[0]},
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
imageSizes: map[string]int64{},
imageStates: make(map[string]*ImageStateSummary),
},
}}
@@ -425,7 +425,7 @@ func TestAddPodWillReplaceAssumed(t *testing.T) {
allocatableResource: &Resource{},
pods: []*v1.Pod{updatedPod.DeepCopy()},
usedPorts: newHostPortInfoBuilder().add("TCP", "0.0.0.0", 90).build(),
imageSizes: map[string]int64{},
imageStates: make(map[string]*ImageStateSummary),
},
},
}}
@@ -481,7 +481,7 @@ func TestAddPodAfterExpiration(t *testing.T) {
allocatableResource: &Resource{},
pods: []*v1.Pod{basePod},
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
imageSizes: map[string]int64{},
imageStates: make(map[string]*ImageStateSummary),
},
}}
@@ -537,7 +537,7 @@ func TestUpdatePod(t *testing.T) {
allocatableResource: &Resource{},
pods: []*v1.Pod{testPods[1]},
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
imageSizes: map[string]int64{},
imageStates: make(map[string]*ImageStateSummary),
}, {
requestedResource: &Resource{
MilliCPU: 100,
@@ -551,7 +551,7 @@ func TestUpdatePod(t *testing.T) {
allocatableResource: &Resource{},
pods: []*v1.Pod{testPods[0]},
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
imageSizes: map[string]int64{},
imageStates: make(map[string]*ImageStateSummary),
}},
}}
@@ -577,6 +577,65 @@ func TestUpdatePod(t *testing.T) {
}
}
// TestUpdatePodAndGet tests get always return latest pod state
func TestUpdatePodAndGet(t *testing.T) {
nodeName := "node"
ttl := 10 * time.Second
testPods := []*v1.Pod{
makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
makeBasePod(t, nodeName, "test", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}),
}
tests := []struct {
pod *v1.Pod
podToUpdate *v1.Pod
handler func(cache Cache, pod *v1.Pod) error
assumePod bool
}{
{
pod: testPods[0],
podToUpdate: testPods[0],
handler: func(cache Cache, pod *v1.Pod) error {
return cache.AssumePod(pod)
},
assumePod: true,
},
{
pod: testPods[0],
podToUpdate: testPods[1],
handler: func(cache Cache, pod *v1.Pod) error {
return cache.AddPod(pod)
},
assumePod: false,
},
}
for _, tt := range tests {
cache := newSchedulerCache(ttl, time.Second, nil)
if err := tt.handler(cache, tt.pod); err != nil {
t.Fatalf("unexpected err: %v", err)
}
if !tt.assumePod {
if err := cache.UpdatePod(tt.pod, tt.podToUpdate); err != nil {
t.Fatalf("UpdatePod failed: %v", err)
}
}
cachedPod, err := cache.GetPod(tt.pod)
if err != nil {
t.Fatalf("GetPod failed: %v", err)
}
if !reflect.DeepEqual(tt.podToUpdate, cachedPod) {
t.Fatalf("pod get=%s, want=%s", cachedPod, tt.podToUpdate)
}
}
}
// TestExpireAddUpdatePod test the sequence that a pod is expired, added, then updated
func TestExpireAddUpdatePod(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation
@@ -610,7 +669,7 @@ func TestExpireAddUpdatePod(t *testing.T) {
allocatableResource: &Resource{},
pods: []*v1.Pod{testPods[1]},
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
imageSizes: map[string]int64{},
imageStates: make(map[string]*ImageStateSummary),
}, {
requestedResource: &Resource{
MilliCPU: 100,
@@ -624,7 +683,7 @@ func TestExpireAddUpdatePod(t *testing.T) {
allocatableResource: &Resource{},
pods: []*v1.Pod{testPods[0]},
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
imageSizes: map[string]int64{},
imageStates: make(map[string]*ImageStateSummary),
}},
}}
@@ -702,7 +761,7 @@ func TestEphemeralStorageResource(t *testing.T) {
allocatableResource: &Resource{},
pods: []*v1.Pod{podE},
usedPorts: schedutil.HostPortInfo{},
imageSizes: map[string]int64{},
imageStates: make(map[string]*ImageStateSummary),
},
},
}
@@ -749,7 +808,7 @@ func TestRemovePod(t *testing.T) {
allocatableResource: &Resource{},
pods: []*v1.Pod{basePod},
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
imageSizes: map[string]int64{},
imageStates: make(map[string]*ImageStateSummary),
},
}}
@@ -1006,6 +1065,9 @@ func TestNodeOperators(t *testing.T) {
if !found {
t.Errorf("Failed to find node %v in schedulercache.", node.Name)
}
if cache.nodeTree.NumNodes != 1 || cache.nodeTree.Next() != node.Name {
t.Errorf("cache.nodeTree is not updated correctly after adding node: %v", node.Name)
}
// Generations are globally unique. We check in our unit tests that they are incremented correctly.
expected.generation = got.generation
@@ -1041,12 +1103,21 @@ func TestNodeOperators(t *testing.T) {
if !reflect.DeepEqual(got, expected) {
t.Errorf("Failed to update node in schedulercache:\n got: %+v \nexpected: %+v", got, expected)
}
// Check nodeTree after update
if cache.nodeTree.NumNodes != 1 || cache.nodeTree.Next() != node.Name {
t.Errorf("unexpected cache.nodeTree after updating node: %v", node.Name)
}
// Case 4: the node can not be removed if pods is not empty.
cache.RemoveNode(node)
if _, found := cache.nodes[node.Name]; !found {
t.Errorf("The node %v should not be removed if pods is not empty.", node.Name)
}
// Check nodeTree after remove. The node should be removed from the nodeTree even if there are
// still pods on it.
if cache.nodeTree.NumNodes != 0 || cache.nodeTree.Next() != "" {
t.Errorf("unexpected cache.nodeTree after removing node: %v", node.Name)
}
}
}
@@ -1069,16 +1140,18 @@ func BenchmarkUpdate1kNodes30kPods(b *testing.B) {
}
}
func BenchmarkExpire100Pods(b *testing.B) {
benchmarkExpire(b, 100)
}
func BenchmarkExpire1kPods(b *testing.B) {
benchmarkExpire(b, 1000)
}
func BenchmarkExpire10kPods(b *testing.B) {
benchmarkExpire(b, 10000)
func BenchmarkExpirePods(b *testing.B) {
podNums := []int{
100,
1000,
10000,
}
for _, podNum := range podNums {
name := fmt.Sprintf("%dPods", podNum)
b.Run(name, func(b *testing.B) {
benchmarkExpire(b, podNum)
})
}
}
func benchmarkExpire(b *testing.B, podNum int) {

View File

@@ -125,6 +125,9 @@ type Cache interface {
// IsUpToDate returns true if the given NodeInfo matches the current data in the cache.
IsUpToDate(n *NodeInfo) bool
// NodeTree returns a node tree structure
NodeTree() *NodeTree
}
// Snapshot is a snapshot of cache state

View File

@@ -58,9 +58,10 @@ type NodeInfo struct {
taints []v1.Taint
taintsErr error
// This is a map from image name to image size, also for checking image existence on the node
// Cache it here to avoid rebuilding the map during scheduling, e.g., in image_locality.go
imageSizes map[string]int64
// imageStates holds the entry of an image if and only if this image is on the node. The entry can be used for
// checking an image's existence and advanced usage (e.g., image locality scheduling policy) based on the image
// state information.
imageStates map[string]*ImageStateSummary
// TransientInfo holds the information pertaining to a scheduling cycle. This will be destructed at the end of
// scheduling cycle.
@@ -261,7 +262,7 @@ func NewNodeInfo(pods ...*v1.Pod) *NodeInfo {
TransientInfo: newTransientSchedulerInfo(),
generation: nextGeneration(),
usedPorts: make(util.HostPortInfo),
imageSizes: make(map[string]int64),
imageStates: make(map[string]*ImageStateSummary),
}
for _, pod := range pods {
ni.AddPod(pod)
@@ -293,12 +294,12 @@ func (n *NodeInfo) UsedPorts() util.HostPortInfo {
return n.usedPorts
}
// ImageSizes returns the image size information on this node.
func (n *NodeInfo) ImageSizes() map[string]int64 {
// ImageStates returns the state information of all images.
func (n *NodeInfo) ImageStates() map[string]*ImageStateSummary {
if n == nil {
return nil
}
return n.imageSizes
return n.imageStates
}
// PodsWithAffinity return all pods with (anti)affinity constraints on this node.
@@ -392,15 +393,20 @@ func (n *NodeInfo) Clone() *NodeInfo {
diskPressureCondition: n.diskPressureCondition,
pidPressureCondition: n.pidPressureCondition,
usedPorts: make(util.HostPortInfo),
imageSizes: n.imageSizes,
imageStates: n.imageStates,
generation: n.generation,
}
if len(n.pods) > 0 {
clone.pods = append([]*v1.Pod(nil), n.pods...)
}
if len(n.usedPorts) > 0 {
for k, v := range n.usedPorts {
clone.usedPorts[k] = v
// util.HostPortInfo is a map-in-map struct
// make sure it's deep copied
for ip, portMap := range n.usedPorts {
clone.usedPorts[ip] = make(map[util.ProtocolPort]struct{})
for protocolPort, v := range portMap {
clone.usedPorts[ip][protocolPort] = v
}
}
}
if len(n.podsWithAffinity) > 0 {
@@ -547,17 +553,6 @@ func (n *NodeInfo) updateUsedPorts(pod *v1.Pod, add bool) {
}
}
func (n *NodeInfo) updateImageSizes() {
node := n.Node()
imageSizes := make(map[string]int64)
for _, image := range node.Status.Images {
for _, name := range image.Names {
imageSizes[name] = image.SizeBytes
}
}
n.imageSizes = imageSizes
}
// SetNode sets the overall node information.
func (n *NodeInfo) SetNode(node *v1.Node) error {
n.node = node
@@ -579,7 +574,6 @@ func (n *NodeInfo) SetNode(node *v1.Node) error {
}
}
n.TransientInfo = newTransientSchedulerInfo()
n.updateImageSizes()
n.generation = nextGeneration()
return nil
}
@@ -596,6 +590,7 @@ func (n *NodeInfo) RemoveNode(node *v1.Node) error {
n.memoryPressureCondition = v1.ConditionUnknown
n.diskPressureCondition = v1.ConditionUnknown
n.pidPressureCondition = v1.ConditionUnknown
n.imageStates = make(map[string]*ImageStateSummary)
n.generation = nextGeneration()
return nil
}

View File

@@ -26,7 +26,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/scheduler/util"
"k8s.io/kubernetes/pkg/util/parsers"
)
func TestNewResource(t *testing.T) {
@@ -241,46 +240,6 @@ func TestSetMaxResource(t *testing.T) {
}
}
func TestImageSizes(t *testing.T) {
ni := fakeNodeInfo()
ni.node = &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "test-node",
},
Status: v1.NodeStatus{
Images: []v1.ContainerImage{
{
Names: []string{
"gcr.io/10:" + parsers.DefaultImageTag,
"gcr.io/10:v1",
},
SizeBytes: int64(10 * 1024 * 1024),
},
{
Names: []string{
"gcr.io/50:" + parsers.DefaultImageTag,
"gcr.io/50:v1",
},
SizeBytes: int64(50 * 1024 * 1024),
},
},
},
}
ni.updateImageSizes()
expected := map[string]int64{
"gcr.io/10:" + parsers.DefaultImageTag: 10 * 1024 * 1024,
"gcr.io/10:v1": 10 * 1024 * 1024,
"gcr.io/50:" + parsers.DefaultImageTag: 50 * 1024 * 1024,
"gcr.io/50:v1": 50 * 1024 * 1024,
}
imageSizes := ni.ImageSizes()
if !reflect.DeepEqual(expected, imageSizes) {
t.Errorf("expected: %#v, got: %#v", expected, imageSizes)
}
}
func TestNewNodeInfo(t *testing.T) {
nodeName := "test-node"
pods := []*v1.Pod{
@@ -312,7 +271,7 @@ func TestNewNodeInfo(t *testing.T) {
{Protocol: "TCP", Port: 8080}: {},
},
},
imageSizes: map[string]int64{},
imageStates: map[string]*ImageStateSummary{},
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
@@ -401,9 +360,7 @@ func TestNodeInfoClone(t *testing.T) {
{Protocol: "TCP", Port: 8080}: {},
},
},
imageSizes: map[string]int64{
"gcr.io/10": 10 * 1024 * 1024,
},
imageStates: map[string]*ImageStateSummary{},
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
@@ -473,9 +430,7 @@ func TestNodeInfoClone(t *testing.T) {
{Protocol: "TCP", Port: 8080}: {},
},
},
imageSizes: map[string]int64{
"gcr.io/10": 10 * 1024 * 1024,
},
imageStates: map[string]*ImageStateSummary{},
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
@@ -540,6 +495,7 @@ func TestNodeInfoClone(t *testing.T) {
ni := test.nodeInfo.Clone()
// Modify the field to check if the result is a clone of the origin one.
test.nodeInfo.generation += 10
test.nodeInfo.usedPorts.Remove("127.0.0.1", "TCP", 80)
if !reflect.DeepEqual(test.expected, ni) {
t.Errorf("expected: %#v, got: %#v", test.expected, ni)
}
@@ -633,7 +589,7 @@ func TestNodeInfoAddPod(t *testing.T) {
{Protocol: "TCP", Port: 8080}: {},
},
},
imageSizes: map[string]int64{},
imageStates: map[string]*ImageStateSummary{},
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
@@ -752,7 +708,7 @@ func TestNodeInfoRemovePod(t *testing.T) {
{Protocol: "TCP", Port: 8080}: {},
},
},
imageSizes: map[string]int64{},
imageStates: map[string]*ImageStateSummary{},
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
@@ -868,7 +824,7 @@ func TestNodeInfoRemovePod(t *testing.T) {
{Protocol: "TCP", Port: 8080}: {},
},
},
imageSizes: map[string]int64{},
imageStates: map[string]*ImageStateSummary{},
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{

View File

@@ -0,0 +1,185 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"fmt"
"sync"
"k8s.io/api/core/v1"
utilnode "k8s.io/kubernetes/pkg/util/node"
"github.com/golang/glog"
)
// NodeTree is a tree-like data structure that holds node names in each zone. Zone names are
// keys to "NodeTree.tree" and values of "NodeTree.tree" are arrays of node names.
type NodeTree struct {
tree map[string]*nodeArray // a map from zone (region-zone) to an array of nodes in the zone.
zones []string // a list of all the zones in the tree (keys)
zoneIndex int
NumNodes int
mu sync.RWMutex
}
// nodeArray is a struct that has nodes that are in a zone.
// We use a slice (as opposed to a set/map) to store the nodes because iterating over the nodes is
// a lot more frequent than searching them by name.
type nodeArray struct {
nodes []string
lastIndex int
}
func (na *nodeArray) next() (nodeName string, exhausted bool) {
if len(na.nodes) == 0 {
glog.Error("The nodeArray is empty. It should have been deleted from NodeTree.")
return "", false
}
if na.lastIndex >= len(na.nodes) {
return "", true
}
nodeName = na.nodes[na.lastIndex]
na.lastIndex++
return nodeName, false
}
func newNodeTree(nodes []*v1.Node) *NodeTree {
nt := &NodeTree{
tree: make(map[string]*nodeArray),
}
for _, n := range nodes {
nt.AddNode(n)
}
return nt
}
// AddNode adds a node and its corresponding zone to the tree. If the zone already exists, the node
// is added to the array of nodes in that zone.
func (nt *NodeTree) AddNode(n *v1.Node) {
nt.mu.Lock()
defer nt.mu.Unlock()
nt.addNode(n)
}
func (nt *NodeTree) addNode(n *v1.Node) {
zone := utilnode.GetZoneKey(n)
if na, ok := nt.tree[zone]; ok {
for _, nodeName := range na.nodes {
if nodeName == n.Name {
glog.Warningf("node %v already exist in the NodeTree", n.Name)
return
}
}
na.nodes = append(na.nodes, n.Name)
} else {
nt.zones = append(nt.zones, zone)
nt.tree[zone] = &nodeArray{nodes: []string{n.Name}, lastIndex: 0}
}
glog.V(5).Infof("Added node %v in group %v to NodeTree", n.Name, zone)
nt.NumNodes++
}
// RemoveNode removes a node from the NodeTree.
func (nt *NodeTree) RemoveNode(n *v1.Node) error {
nt.mu.Lock()
defer nt.mu.Unlock()
return nt.removeNode(n)
}
func (nt *NodeTree) removeNode(n *v1.Node) error {
zone := utilnode.GetZoneKey(n)
if na, ok := nt.tree[zone]; ok {
for i, nodeName := range na.nodes {
if nodeName == n.Name {
na.nodes = append(na.nodes[:i], na.nodes[i+1:]...)
if len(na.nodes) == 0 {
nt.removeZone(zone)
}
glog.V(5).Infof("Removed node %v in group %v from NodeTree", n.Name, zone)
nt.NumNodes--
return nil
}
}
}
glog.Errorf("Node %v in group %v was not found", n.Name, zone)
return fmt.Errorf("node %v in group %v was not found", n.Name, zone)
}
// removeZone removes a zone from tree.
// This function must be called while writer locks are hold.
func (nt *NodeTree) removeZone(zone string) {
delete(nt.tree, zone)
for i, z := range nt.zones {
if z == zone {
nt.zones = append(nt.zones[:i], nt.zones[i+1:]...)
}
}
}
// UpdateNode updates a node in the NodeTree.
func (nt *NodeTree) UpdateNode(old, new *v1.Node) {
var oldZone string
if old != nil {
oldZone = utilnode.GetZoneKey(old)
}
newZone := utilnode.GetZoneKey(new)
// If the zone ID of the node has not changed, we don't need to do anything. Name of the node
// cannot be changed in an update.
if oldZone == newZone {
return
}
nt.mu.Lock()
defer nt.mu.Unlock()
nt.removeNode(old) // No error checking. We ignore whether the old node exists or not.
nt.addNode(new)
}
func (nt *NodeTree) resetExhausted() {
for _, na := range nt.tree {
na.lastIndex = 0
}
nt.zoneIndex = 0
}
// Next returns the name of the next node. NodeTree iterates over zones and in each zone iterates
// over nodes in a round robin fashion.
func (nt *NodeTree) Next() string {
nt.mu.Lock()
defer nt.mu.Unlock()
if len(nt.zones) == 0 {
return ""
}
numExhaustedZones := 0
for {
if nt.zoneIndex >= len(nt.zones) {
nt.zoneIndex = 0
}
zone := nt.zones[nt.zoneIndex]
nt.zoneIndex++
// We do not check the exhausted zones before calling next() on the zone. This ensures
// that if more nodes are added to a zone after it is exhausted, we iterate over the new nodes.
nodeName, exhausted := nt.tree[zone].next()
if exhausted {
numExhaustedZones++
if numExhaustedZones >= len(nt.zones) { // all zones are exhausted. we should reset.
nt.resetExhausted()
}
} else {
return nodeName
}
}
}

View File

@@ -0,0 +1,448 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"reflect"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
)
var allNodes = []*v1.Node{
// Node 0: a node without any region-zone label
{
ObjectMeta: metav1.ObjectMeta{
Name: "node-0",
},
},
// Node 1: a node with region label only
{
ObjectMeta: metav1.ObjectMeta{
Name: "node-1",
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region-1",
},
},
},
// Node 2: a node with zone label only
{
ObjectMeta: metav1.ObjectMeta{
Name: "node-2",
Labels: map[string]string{
kubeletapis.LabelZoneFailureDomain: "zone-2",
},
},
},
// Node 3: a node with proper region and zone labels
{
ObjectMeta: metav1.ObjectMeta{
Name: "node-3",
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region-1",
kubeletapis.LabelZoneFailureDomain: "zone-2",
},
},
},
// Node 4: a node with proper region and zone labels
{
ObjectMeta: metav1.ObjectMeta{
Name: "node-4",
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region-1",
kubeletapis.LabelZoneFailureDomain: "zone-2",
},
},
},
// Node 5: a node with proper region and zone labels in a different zone, same region as above
{
ObjectMeta: metav1.ObjectMeta{
Name: "node-5",
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region-1",
kubeletapis.LabelZoneFailureDomain: "zone-3",
},
},
},
// Node 6: a node with proper region and zone labels in a new region and zone
{
ObjectMeta: metav1.ObjectMeta{
Name: "node-6",
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region-2",
kubeletapis.LabelZoneFailureDomain: "zone-2",
},
},
},
// Node 7: a node with proper region and zone labels in a region and zone as node-6
{
ObjectMeta: metav1.ObjectMeta{
Name: "node-7",
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region-2",
kubeletapis.LabelZoneFailureDomain: "zone-2",
},
},
},
// Node 8: a node with proper region and zone labels in a region and zone as node-6
{
ObjectMeta: metav1.ObjectMeta{
Name: "node-8",
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region-2",
kubeletapis.LabelZoneFailureDomain: "zone-2",
},
},
}}
func verifyNodeTree(t *testing.T, nt *NodeTree, expectedTree map[string]*nodeArray) {
expectedNumNodes := int(0)
for _, na := range expectedTree {
expectedNumNodes += len(na.nodes)
}
if nt.NumNodes != expectedNumNodes {
t.Errorf("unexpected NodeTree.numNodes. Expected: %v, Got: %v", expectedNumNodes, nt.NumNodes)
}
if !reflect.DeepEqual(nt.tree, expectedTree) {
t.Errorf("The node tree is not the same as expected. Expected: %v, Got: %v", expectedTree, nt.tree)
}
if len(nt.zones) != len(expectedTree) {
t.Errorf("Number of zones in NodeTree.zones is not expected. Expected: %v, Got: %v", len(expectedTree), len(nt.zones))
}
for _, z := range nt.zones {
if _, ok := expectedTree[z]; !ok {
t.Errorf("zone %v is not expected to exist in NodeTree.zones", z)
}
}
}
func TestNodeTree_AddNode(t *testing.T) {
tests := []struct {
name string
nodesToAdd []*v1.Node
expectedTree map[string]*nodeArray
}{
{
name: "single node no labels",
nodesToAdd: allNodes[:1],
expectedTree: map[string]*nodeArray{"": {[]string{"node-0"}, 0}},
},
{
name: "mix of nodes with and without proper labels",
nodesToAdd: allNodes[:4],
expectedTree: map[string]*nodeArray{
"": {[]string{"node-0"}, 0},
"region-1:\x00:": {[]string{"node-1"}, 0},
":\x00:zone-2": {[]string{"node-2"}, 0},
"region-1:\x00:zone-2": {[]string{"node-3"}, 0},
},
},
{
name: "mix of nodes with and without proper labels and some zones with multiple nodes",
nodesToAdd: allNodes[:7],
expectedTree: map[string]*nodeArray{
"": {[]string{"node-0"}, 0},
"region-1:\x00:": {[]string{"node-1"}, 0},
":\x00:zone-2": {[]string{"node-2"}, 0},
"region-1:\x00:zone-2": {[]string{"node-3", "node-4"}, 0},
"region-1:\x00:zone-3": {[]string{"node-5"}, 0},
"region-2:\x00:zone-2": {[]string{"node-6"}, 0},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
nt := newNodeTree(nil)
for _, n := range test.nodesToAdd {
nt.AddNode(n)
}
verifyNodeTree(t, nt, test.expectedTree)
})
}
}
func TestNodeTree_RemoveNode(t *testing.T) {
tests := []struct {
name string
existingNodes []*v1.Node
nodesToRemove []*v1.Node
expectedTree map[string]*nodeArray
expectError bool
}{
{
name: "remove a single node with no labels",
existingNodes: allNodes[:7],
nodesToRemove: allNodes[:1],
expectedTree: map[string]*nodeArray{
"region-1:\x00:": {[]string{"node-1"}, 0},
":\x00:zone-2": {[]string{"node-2"}, 0},
"region-1:\x00:zone-2": {[]string{"node-3", "node-4"}, 0},
"region-1:\x00:zone-3": {[]string{"node-5"}, 0},
"region-2:\x00:zone-2": {[]string{"node-6"}, 0},
},
},
{
name: "remove a few nodes including one from a zone with multiple nodes",
existingNodes: allNodes[:7],
nodesToRemove: allNodes[1:4],
expectedTree: map[string]*nodeArray{
"": {[]string{"node-0"}, 0},
"region-1:\x00:zone-2": {[]string{"node-4"}, 0},
"region-1:\x00:zone-3": {[]string{"node-5"}, 0},
"region-2:\x00:zone-2": {[]string{"node-6"}, 0},
},
},
{
name: "remove all nodes",
existingNodes: allNodes[:7],
nodesToRemove: allNodes[:7],
expectedTree: map[string]*nodeArray{},
},
{
name: "remove non-existing node",
existingNodes: nil,
nodesToRemove: allNodes[:5],
expectedTree: map[string]*nodeArray{},
expectError: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
nt := newNodeTree(test.existingNodes)
for _, n := range test.nodesToRemove {
err := nt.RemoveNode(n)
if test.expectError == (err == nil) {
t.Errorf("unexpected returned error value: %v", err)
}
}
verifyNodeTree(t, nt, test.expectedTree)
})
}
}
func TestNodeTree_UpdateNode(t *testing.T) {
tests := []struct {
name string
existingNodes []*v1.Node
nodeToUpdate *v1.Node
expectedTree map[string]*nodeArray
}{
{
name: "update a node without label",
existingNodes: allNodes[:7],
nodeToUpdate: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node-0",
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region-1",
kubeletapis.LabelZoneFailureDomain: "zone-2",
},
},
},
expectedTree: map[string]*nodeArray{
"region-1:\x00:": {[]string{"node-1"}, 0},
":\x00:zone-2": {[]string{"node-2"}, 0},
"region-1:\x00:zone-2": {[]string{"node-3", "node-4", "node-0"}, 0},
"region-1:\x00:zone-3": {[]string{"node-5"}, 0},
"region-2:\x00:zone-2": {[]string{"node-6"}, 0},
},
},
{
name: "update the only existing node",
existingNodes: allNodes[:1],
nodeToUpdate: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node-0",
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region-1",
kubeletapis.LabelZoneFailureDomain: "zone-2",
},
},
},
expectedTree: map[string]*nodeArray{
"region-1:\x00:zone-2": {[]string{"node-0"}, 0},
},
},
{
name: "update non-existing node",
existingNodes: allNodes[:1],
nodeToUpdate: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node-new",
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region-1",
kubeletapis.LabelZoneFailureDomain: "zone-2",
},
},
},
expectedTree: map[string]*nodeArray{
"": {[]string{"node-0"}, 0},
"region-1:\x00:zone-2": {[]string{"node-new"}, 0},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
nt := newNodeTree(test.existingNodes)
var oldNode *v1.Node
for _, n := range allNodes {
if n.Name == test.nodeToUpdate.Name {
oldNode = n
break
}
}
if oldNode == nil {
oldNode = &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "nonexisting-node"}}
}
nt.UpdateNode(oldNode, test.nodeToUpdate)
verifyNodeTree(t, nt, test.expectedTree)
})
}
}
func TestNodeTree_Next(t *testing.T) {
tests := []struct {
name string
nodesToAdd []*v1.Node
numRuns int // number of times to run Next()
expectedOutput []string
}{
{
name: "empty tree",
nodesToAdd: nil,
numRuns: 2,
expectedOutput: []string{"", ""},
},
{
name: "should go back to the first node after finishing a round",
nodesToAdd: allNodes[:1],
numRuns: 2,
expectedOutput: []string{"node-0", "node-0"},
},
{
name: "should go back to the first node after going over all nodes",
nodesToAdd: allNodes[:4],
numRuns: 5,
expectedOutput: []string{"node-0", "node-1", "node-2", "node-3", "node-0"},
},
{
name: "should go to all zones before going to the second nodes in the same zone",
nodesToAdd: allNodes[:9],
numRuns: 11,
expectedOutput: []string{"node-0", "node-1", "node-2", "node-3", "node-5", "node-6", "node-4", "node-7", "node-8", "node-0", "node-1"},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
nt := newNodeTree(test.nodesToAdd)
var output []string
for i := 0; i < test.numRuns; i++ {
output = append(output, nt.Next())
}
if !reflect.DeepEqual(output, test.expectedOutput) {
t.Errorf("unexpected output. Expected: %v, Got: %v", test.expectedOutput, output)
}
})
}
}
func TestNodeTreeMultiOperations(t *testing.T) {
tests := []struct {
name string
nodesToAdd []*v1.Node
nodesToRemove []*v1.Node
operations []string
expectedOutput []string
}{
{
name: "add and remove all nodes between two Next operations",
nodesToAdd: allNodes[2:9],
nodesToRemove: allNodes[2:9],
operations: []string{"add", "add", "next", "add", "remove", "remove", "remove", "next"},
expectedOutput: []string{"node-2", ""},
},
{
name: "add and remove some nodes between two Next operations",
nodesToAdd: allNodes[2:9],
nodesToRemove: allNodes[2:9],
operations: []string{"add", "add", "next", "add", "remove", "remove", "next"},
expectedOutput: []string{"node-2", "node-4"},
},
{
name: "remove nodes already iterated on and add new nodes",
nodesToAdd: allNodes[2:9],
nodesToRemove: allNodes[2:9],
operations: []string{"add", "add", "next", "next", "add", "remove", "remove", "next"},
expectedOutput: []string{"node-2", "node-3", "node-4"},
},
{
name: "add more nodes to an exhausted zone",
nodesToAdd: append(allNodes[4:9], allNodes[3]),
nodesToRemove: nil,
operations: []string{"add", "add", "add", "add", "add", "next", "next", "next", "next", "add", "next", "next", "next"},
expectedOutput: []string{"node-4", "node-5", "node-6", "node-7", "node-3", "node-8", "node-4"},
},
{
name: "remove zone and add new to ensure exhausted is reset correctly",
nodesToAdd: append(allNodes[3:5], allNodes[6:8]...),
nodesToRemove: allNodes[3:5],
operations: []string{"add", "add", "next", "next", "remove", "add", "add", "next", "next", "remove", "next", "next"},
expectedOutput: []string{"node-3", "node-4", "node-6", "node-7", "node-6", "node-7"},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
nt := newNodeTree(nil)
addIndex := 0
removeIndex := 0
var output []string
for _, op := range test.operations {
switch op {
case "add":
if addIndex >= len(test.nodesToAdd) {
t.Error("more add operations than nodesToAdd")
} else {
nt.AddNode(test.nodesToAdd[addIndex])
addIndex++
}
case "remove":
if removeIndex >= len(test.nodesToRemove) {
t.Error("more remove operations than nodesToRemove")
} else {
nt.RemoveNode(test.nodesToRemove[removeIndex])
removeIndex++
}
case "next":
output = append(output, nt.Next())
default:
t.Errorf("unknow operation: %v", op)
}
}
if !reflect.DeepEqual(output, test.expectedOutput) {
t.Errorf("unexpected output. Expected: %v, Got: %v", test.expectedOutput, output)
}
})
}
}

View File

@@ -16,7 +16,10 @@ limitations under the License.
package cache
import "k8s.io/api/core/v1"
import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
)
// CreateNodeNameToInfoMap obtains a list of pods and pivots that list into a map where the keys are node names
// and the values are the aggregated information for that node.
@@ -29,11 +32,47 @@ func CreateNodeNameToInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*NodeI
}
nodeNameToInfo[nodeName].AddPod(pod)
}
imageExistenceMap := createImageExistenceMap(nodes)
for _, node := range nodes {
if _, ok := nodeNameToInfo[node.Name]; !ok {
nodeNameToInfo[node.Name] = NewNodeInfo()
}
nodeNameToInfo[node.Name].SetNode(node)
nodeInfo := nodeNameToInfo[node.Name]
nodeInfo.SetNode(node)
nodeInfo.imageStates = getNodeImageStates(node, imageExistenceMap)
}
return nodeNameToInfo
}
// getNodeImageStates returns the given node's image states based on the given imageExistence map.
func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.String) map[string]*ImageStateSummary {
imageStates := make(map[string]*ImageStateSummary)
for _, image := range node.Status.Images {
for _, name := range image.Names {
imageStates[name] = &ImageStateSummary{
Size: image.SizeBytes,
NumNodes: len(imageExistenceMap[name]),
}
}
}
return imageStates
}
// createImageExistenceMap returns a map recording on which nodes the images exist, keyed by the images' names.
func createImageExistenceMap(nodes []*v1.Node) map[string]sets.String {
imageExistenceMap := make(map[string]sets.String)
for _, node := range nodes {
for _, image := range node.Status.Images {
for _, name := range image.Names {
if _, ok := imageExistenceMap[name]; !ok {
imageExistenceMap[name] = sets.NewString(node.Name)
} else {
imageExistenceMap[name].Insert(node.Name)
}
}
}
}
return imageExistenceMap
}

View File

@@ -0,0 +1,134 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"reflect"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
)
const mb int64 = 1024 * 1024
func TestGetNodeImageStates(t *testing.T) {
tests := []struct {
node *v1.Node
imageExistenceMap map[string]sets.String
expected map[string]*ImageStateSummary
}{
{
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "node-0"},
Status: v1.NodeStatus{
Images: []v1.ContainerImage{
{
Names: []string{
"gcr.io/10:v1",
},
SizeBytes: int64(10 * mb),
},
{
Names: []string{
"gcr.io/200:v1",
},
SizeBytes: int64(200 * mb),
},
},
},
},
imageExistenceMap: map[string]sets.String{
"gcr.io/10:v1": sets.NewString("node-0", "node-1"),
"gcr.io/200:v1": sets.NewString("node-0"),
},
expected: map[string]*ImageStateSummary{
"gcr.io/10:v1": {
Size: int64(10 * mb),
NumNodes: 2,
},
"gcr.io/200:v1": {
Size: int64(200 * mb),
NumNodes: 1,
},
},
},
}
for _, test := range tests {
imageStates := getNodeImageStates(test.node, test.imageExistenceMap)
if !reflect.DeepEqual(test.expected, imageStates) {
t.Errorf("expected: %#v, got: %#v", test.expected, imageStates)
}
}
}
func TestCreateImageExistenceMap(t *testing.T) {
tests := []struct {
nodes []*v1.Node
expected map[string]sets.String
}{
{
nodes: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{Name: "node-0"},
Status: v1.NodeStatus{
Images: []v1.ContainerImage{
{
Names: []string{
"gcr.io/10:v1",
},
SizeBytes: int64(10 * mb),
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "node-1"},
Status: v1.NodeStatus{
Images: []v1.ContainerImage{
{
Names: []string{
"gcr.io/10:v1",
},
SizeBytes: int64(10 * mb),
},
{
Names: []string{
"gcr.io/200:v1",
},
SizeBytes: int64(200 * mb),
},
},
},
},
},
expected: map[string]sets.String{
"gcr.io/10:v1": sets.NewString("node-0", "node-1"),
"gcr.io/200:v1": sets.NewString("node-1"),
},
},
}
for _, test := range tests {
imageMap := createImageExistenceMap(test.nodes)
if !reflect.DeepEqual(test.expected, imageMap) {
t.Errorf("expected: %#v, got: %#v", test.expected, imageMap)
}
}
}