Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -12,42 +12,9 @@ go_library(
"cache_comparer.go",
"factory.go",
"plugins.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"signal.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"signal.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"signal.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"signal.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"signal.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"signal.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"signal.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"signal.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"signal.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"signal.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"signal_windows.go",
],
"//conditions:default": [],
}),
"signal.go",
"signal_windows.go",
],
importpath = "k8s.io/kubernetes/pkg/scheduler/factory",
deps = [
"//pkg/api/v1/pod:go_default_library",
@@ -62,32 +29,32 @@ go_library(
"//pkg/scheduler/api/validation:go_default_library",
"//pkg/scheduler/cache:go_default_library",
"//pkg/scheduler/core:go_default_library",
"//pkg/scheduler/core/equivalence:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//pkg/scheduler/volumebinder:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/informers/apps/v1:go_default_library",
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/informers/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/informers/apps/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/informers/policy/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/informers/storage/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/listers/apps/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/listers/policy/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/listers/storage/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
],
)
@@ -110,19 +77,19 @@ go_test(
"//pkg/scheduler/core:go_default_library",
"//pkg/scheduler/testing:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)

View File

@@ -27,27 +27,29 @@ import (
)
func TestCompareNodes(t *testing.T) {
compare := compareStrategy{}
tests := []struct {
name string
actual []string
cached []string
missing []string
redundant []string
}{
{
name: "redundant cached value",
actual: []string{"foo", "bar"},
cached: []string{"bar", "foo", "foobar"},
missing: []string{},
redundant: []string{"foobar"},
},
{
name: "missing cached value",
actual: []string{"foo", "bar", "foobar"},
cached: []string{"bar", "foo"},
missing: []string{"foobar"},
redundant: []string{},
},
{
name: "proper cache set",
actual: []string{"foo", "bar", "foobar"},
cached: []string{"bar", "foobar", "foo"},
missing: []string{},
@@ -56,34 +58,40 @@ func TestCompareNodes(t *testing.T) {
}
for _, test := range tests {
nodes := []*v1.Node{}
for _, nodeName := range test.actual {
node := &v1.Node{}
node.Name = nodeName
nodes = append(nodes, node)
}
t.Run(test.name, func(t *testing.T) {
testCompareNodes(test.actual, test.cached, test.missing, test.redundant, t)
})
}
}
nodeInfo := make(map[string]*schedulercache.NodeInfo)
for _, nodeName := range test.cached {
nodeInfo[nodeName] = &schedulercache.NodeInfo{}
}
func testCompareNodes(actual, cached, missing, redundant []string, t *testing.T) {
compare := compareStrategy{}
nodes := []*v1.Node{}
for _, nodeName := range actual {
node := &v1.Node{}
node.Name = nodeName
nodes = append(nodes, node)
}
m, r := compare.CompareNodes(nodes, nodeInfo)
nodeInfo := make(map[string]*schedulercache.NodeInfo)
for _, nodeName := range cached {
nodeInfo[nodeName] = &schedulercache.NodeInfo{}
}
if !reflect.DeepEqual(m, test.missing) {
t.Errorf("missing expected to be %s; got %s", test.missing, m)
}
m, r := compare.CompareNodes(nodes, nodeInfo)
if !reflect.DeepEqual(r, test.redundant) {
t.Errorf("redundant expected to be %s; got %s", test.redundant, r)
}
if !reflect.DeepEqual(m, missing) {
t.Errorf("missing expected to be %s; got %s", missing, m)
}
if !reflect.DeepEqual(r, redundant) {
t.Errorf("redundant expected to be %s; got %s", redundant, r)
}
}
func TestComparePods(t *testing.T) {
compare := compareStrategy{}
tests := []struct {
name string
actual []string
cached []string
queued []string
@@ -91,6 +99,7 @@ func TestComparePods(t *testing.T) {
redundant []string
}{
{
name: "redundant cached value",
actual: []string{"foo", "bar"},
cached: []string{"bar", "foo", "foobar"},
queued: []string{},
@@ -98,6 +107,7 @@ func TestComparePods(t *testing.T) {
redundant: []string{"foobar"},
},
{
name: "redundant and queued values",
actual: []string{"foo", "bar"},
cached: []string{"foo", "foobar"},
queued: []string{"bar"},
@@ -105,6 +115,7 @@ func TestComparePods(t *testing.T) {
redundant: []string{"foobar"},
},
{
name: "missing cached value",
actual: []string{"foo", "bar", "foobar"},
cached: []string{"bar", "foo"},
queued: []string{},
@@ -112,6 +123,7 @@ func TestComparePods(t *testing.T) {
redundant: []string{},
},
{
name: "missing and queued values",
actual: []string{"foo", "bar", "foobar"},
cached: []string{"foo"},
queued: []string{"bar"},
@@ -119,6 +131,7 @@ func TestComparePods(t *testing.T) {
redundant: []string{},
},
{
name: "correct cache set",
actual: []string{"foo", "bar", "foobar"},
cached: []string{"bar", "foobar", "foo"},
queued: []string{},
@@ -126,6 +139,7 @@ func TestComparePods(t *testing.T) {
redundant: []string{},
},
{
name: "queued cache value",
actual: []string{"foo", "bar", "foobar"},
cached: []string{"foobar", "foo"},
queued: []string{"bar"},
@@ -135,64 +149,73 @@ func TestComparePods(t *testing.T) {
}
for _, test := range tests {
pods := []*v1.Pod{}
for _, uid := range test.actual {
pod := &v1.Pod{}
pod.UID = types.UID(uid)
pods = append(pods, pod)
}
t.Run(test.name, func(t *testing.T) {
testComparePods(test.actual, test.cached, test.queued, test.missing, test.redundant, t)
})
}
}
queuedPods := []*v1.Pod{}
for _, uid := range test.queued {
pod := &v1.Pod{}
pod.UID = types.UID(uid)
queuedPods = append(queuedPods, pod)
}
func testComparePods(actual, cached, queued, missing, redundant []string, t *testing.T) {
compare := compareStrategy{}
pods := []*v1.Pod{}
for _, uid := range actual {
pod := &v1.Pod{}
pod.UID = types.UID(uid)
pods = append(pods, pod)
}
nodeInfo := make(map[string]*schedulercache.NodeInfo)
for _, uid := range test.cached {
pod := &v1.Pod{}
pod.UID = types.UID(uid)
pod.Namespace = "ns"
pod.Name = uid
queuedPods := []*v1.Pod{}
for _, uid := range queued {
pod := &v1.Pod{}
pod.UID = types.UID(uid)
queuedPods = append(queuedPods, pod)
}
nodeInfo[uid] = schedulercache.NewNodeInfo(pod)
}
nodeInfo := make(map[string]*schedulercache.NodeInfo)
for _, uid := range cached {
pod := &v1.Pod{}
pod.UID = types.UID(uid)
pod.Namespace = "ns"
pod.Name = uid
m, r := compare.ComparePods(pods, queuedPods, nodeInfo)
nodeInfo[uid] = schedulercache.NewNodeInfo(pod)
}
if !reflect.DeepEqual(m, test.missing) {
t.Errorf("missing expected to be %s; got %s", test.missing, m)
}
m, r := compare.ComparePods(pods, queuedPods, nodeInfo)
if !reflect.DeepEqual(r, test.redundant) {
t.Errorf("redundant expected to be %s; got %s", test.redundant, r)
}
if !reflect.DeepEqual(m, missing) {
t.Errorf("missing expected to be %s; got %s", missing, m)
}
if !reflect.DeepEqual(r, redundant) {
t.Errorf("redundant expected to be %s; got %s", redundant, r)
}
}
func TestComparePdbs(t *testing.T) {
compare := compareStrategy{}
tests := []struct {
name string
actual []string
cached []string
missing []string
redundant []string
}{
{
name: "redundant cache value",
actual: []string{"foo", "bar"},
cached: []string{"bar", "foo", "foobar"},
missing: []string{},
redundant: []string{"foobar"},
},
{
name: "missing cache value",
actual: []string{"foo", "bar", "foobar"},
cached: []string{"bar", "foo"},
missing: []string{"foobar"},
redundant: []string{},
},
{
name: "correct cache",
actual: []string{"foo", "bar", "foobar"},
cached: []string{"bar", "foobar", "foo"},
missing: []string{},
@@ -201,28 +224,35 @@ func TestComparePdbs(t *testing.T) {
}
for _, test := range tests {
pdbs := []*policy.PodDisruptionBudget{}
for _, uid := range test.actual {
pdb := &policy.PodDisruptionBudget{}
pdb.UID = types.UID(uid)
pdbs = append(pdbs, pdb)
}
cache := make(map[string]*policy.PodDisruptionBudget)
for _, uid := range test.cached {
pdb := &policy.PodDisruptionBudget{}
pdb.UID = types.UID(uid)
cache[uid] = pdb
}
m, r := compare.ComparePdbs(pdbs, cache)
if !reflect.DeepEqual(m, test.missing) {
t.Errorf("missing expected to be %s; got %s", test.missing, m)
}
if !reflect.DeepEqual(r, test.redundant) {
t.Errorf("redundant expected to be %s; got %s", test.redundant, r)
}
t.Run(test.name, func(t *testing.T) {
testComparePdbs(test.actual, test.cached, test.missing, test.redundant, t)
})
}
}
func testComparePdbs(actual, cached, missing, redundant []string, t *testing.T) {
compare := compareStrategy{}
pdbs := []*policy.PodDisruptionBudget{}
for _, uid := range actual {
pdb := &policy.PodDisruptionBudget{}
pdb.UID = types.UID(uid)
pdbs = append(pdbs, pdb)
}
cache := make(map[string]*policy.PodDisruptionBudget)
for _, uid := range cached {
pdb := &policy.PodDisruptionBudget{}
pdb.UID = types.UID(uid)
cache[uid] = pdb
}
m, r := compare.ComparePdbs(pdbs, cache)
if !reflect.DeepEqual(m, missing) {
t.Errorf("missing expected to be %s; got %s", missing, m)
}
if !reflect.DeepEqual(r, redundant) {
t.Errorf("redundant expected to be %s; got %s", redundant, r)
}
}

View File

@@ -29,6 +29,7 @@ import (
"k8s.io/api/core/v1"
"k8s.io/api/policy/v1beta1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
@@ -38,15 +39,13 @@ import (
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
appsinformers "k8s.io/client-go/informers/apps/v1beta1"
appsinformers "k8s.io/client-go/informers/apps/v1"
coreinformers "k8s.io/client-go/informers/core/v1"
extensionsinformers "k8s.io/client-go/informers/extensions/v1beta1"
policyinformers "k8s.io/client-go/informers/policy/v1beta1"
storageinformers "k8s.io/client-go/informers/storage/v1"
clientset "k8s.io/client-go/kubernetes"
appslisters "k8s.io/client-go/listers/apps/v1beta1"
appslisters "k8s.io/client-go/listers/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1"
extensionslisters "k8s.io/client-go/listers/extensions/v1beta1"
policylisters "k8s.io/client-go/listers/policy/v1beta1"
storagelisters "k8s.io/client-go/listers/storage/v1"
"k8s.io/client-go/tools/cache"
@@ -61,6 +60,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/api/validation"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
"k8s.io/kubernetes/pkg/scheduler/core"
"k8s.io/kubernetes/pkg/scheduler/core/equivalence"
"k8s.io/kubernetes/pkg/scheduler/util"
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
)
@@ -98,7 +98,7 @@ type configFactory struct {
// a means to list all controllers
controllerLister corelisters.ReplicationControllerLister
// a means to list all replicasets
replicaSetLister extensionslisters.ReplicaSetLister
replicaSetLister appslisters.ReplicaSetLister
// a means to list all statefulsets
statefulSetLister appslisters.StatefulSetLister
// a means to list all PodDisruptionBudgets
@@ -123,7 +123,7 @@ type configFactory struct {
hardPodAffinitySymmetricWeight int32
// Equivalence class cache
equivalencePodCache *core.EquivalenceCache
equivalencePodCache *equivalence.Cache
// Enable equivalence class cache
enableEquivalenceClassCache bool
@@ -136,59 +136,67 @@ type configFactory struct {
// Disable pod preemption or not.
disablePreemption bool
// percentageOfNodesToScore specifies percentage of all nodes to score in each scheduling cycle.
percentageOfNodesToScore int32
}
// ConfigFactoryArgs is a set arguments passed to NewConfigFactory.
type ConfigFactoryArgs struct {
SchedulerName string
Client clientset.Interface
NodeInformer coreinformers.NodeInformer
PodInformer coreinformers.PodInformer
PvInformer coreinformers.PersistentVolumeInformer
PvcInformer coreinformers.PersistentVolumeClaimInformer
ReplicationControllerInformer coreinformers.ReplicationControllerInformer
ReplicaSetInformer appsinformers.ReplicaSetInformer
StatefulSetInformer appsinformers.StatefulSetInformer
ServiceInformer coreinformers.ServiceInformer
PdbInformer policyinformers.PodDisruptionBudgetInformer
StorageClassInformer storageinformers.StorageClassInformer
HardPodAffinitySymmetricWeight int32
EnableEquivalenceClassCache bool
DisablePreemption bool
PercentageOfNodesToScore int32
BindTimeoutSeconds int64
}
// NewConfigFactory initializes the default implementation of a Configurator To encourage eventual privatization of the struct type, we only
// return the interface.
func NewConfigFactory(
schedulerName string,
client clientset.Interface,
nodeInformer coreinformers.NodeInformer,
podInformer coreinformers.PodInformer,
pvInformer coreinformers.PersistentVolumeInformer,
pvcInformer coreinformers.PersistentVolumeClaimInformer,
replicationControllerInformer coreinformers.ReplicationControllerInformer,
replicaSetInformer extensionsinformers.ReplicaSetInformer,
statefulSetInformer appsinformers.StatefulSetInformer,
serviceInformer coreinformers.ServiceInformer,
pdbInformer policyinformers.PodDisruptionBudgetInformer,
storageClassInformer storageinformers.StorageClassInformer,
hardPodAffinitySymmetricWeight int32,
enableEquivalenceClassCache bool,
disablePreemption bool,
) scheduler.Configurator {
func NewConfigFactory(args *ConfigFactoryArgs) scheduler.Configurator {
stopEverything := make(chan struct{})
schedulerCache := schedulercache.New(30*time.Second, stopEverything)
// storageClassInformer is only enabled through VolumeScheduling feature gate
var storageClassLister storagelisters.StorageClassLister
if storageClassInformer != nil {
storageClassLister = storageClassInformer.Lister()
if args.StorageClassInformer != nil {
storageClassLister = args.StorageClassInformer.Lister()
}
c := &configFactory{
client: client,
client: args.Client,
podLister: schedulerCache,
podQueue: core.NewSchedulingQueue(),
pVLister: pvInformer.Lister(),
pVCLister: pvcInformer.Lister(),
serviceLister: serviceInformer.Lister(),
controllerLister: replicationControllerInformer.Lister(),
replicaSetLister: replicaSetInformer.Lister(),
statefulSetLister: statefulSetInformer.Lister(),
pdbLister: pdbInformer.Lister(),
pVLister: args.PvInformer.Lister(),
pVCLister: args.PvcInformer.Lister(),
serviceLister: args.ServiceInformer.Lister(),
controllerLister: args.ReplicationControllerInformer.Lister(),
replicaSetLister: args.ReplicaSetInformer.Lister(),
statefulSetLister: args.StatefulSetInformer.Lister(),
pdbLister: args.PdbInformer.Lister(),
storageClassLister: storageClassLister,
schedulerCache: schedulerCache,
StopEverything: stopEverything,
schedulerName: schedulerName,
hardPodAffinitySymmetricWeight: hardPodAffinitySymmetricWeight,
enableEquivalenceClassCache: enableEquivalenceClassCache,
disablePreemption: disablePreemption,
schedulerName: args.SchedulerName,
hardPodAffinitySymmetricWeight: args.HardPodAffinitySymmetricWeight,
enableEquivalenceClassCache: args.EnableEquivalenceClassCache,
disablePreemption: args.DisablePreemption,
percentageOfNodesToScore: args.PercentageOfNodesToScore,
}
c.scheduledPodsHasSynced = podInformer.Informer().HasSynced
c.scheduledPodsHasSynced = args.PodInformer.Informer().HasSynced
// scheduled pod cache
podInformer.Informer().AddEventHandler(
args.PodInformer.Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch t := obj.(type) {
@@ -213,15 +221,15 @@ func NewConfigFactory(
},
)
// unscheduled pod queue
podInformer.Informer().AddEventHandler(
args.PodInformer.Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch t := obj.(type) {
case *v1.Pod:
return unassignedNonTerminatedPod(t) && responsibleForPod(t, schedulerName)
return unassignedNonTerminatedPod(t) && responsibleForPod(t, args.SchedulerName)
case cache.DeletedFinalStateUnknown:
if pod, ok := t.Obj.(*v1.Pod); ok {
return unassignedNonTerminatedPod(pod) && responsibleForPod(pod, schedulerName)
return unassignedNonTerminatedPod(pod) && responsibleForPod(pod, args.SchedulerName)
}
runtime.HandleError(fmt.Errorf("unable to convert object %T to *v1.Pod in %T", obj, c))
return false
@@ -239,29 +247,29 @@ func NewConfigFactory(
)
// ScheduledPodLister is something we provide to plug-in functions that
// they may need to call.
c.scheduledPodLister = assignedPodLister{podInformer.Lister()}
c.scheduledPodLister = assignedPodLister{args.PodInformer.Lister()}
nodeInformer.Informer().AddEventHandler(
args.NodeInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: c.addNodeToCache,
UpdateFunc: c.updateNodeInCache,
DeleteFunc: c.deleteNodeFromCache,
},
)
c.nodeLister = nodeInformer.Lister()
c.nodeLister = args.NodeInformer.Lister()
pdbInformer.Informer().AddEventHandler(
args.PdbInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: c.addPDBToCache,
UpdateFunc: c.updatePDBInCache,
DeleteFunc: c.deletePDBFromCache,
},
)
c.pdbLister = pdbInformer.Lister()
c.pdbLister = args.PdbInformer.Lister()
// On add and delete of PVs, it will affect equivalence cache items
// related to persistent volume
pvInformer.Informer().AddEventHandler(
args.PvInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
// MaxPDVolumeCountPredicate: since it relies on the counts of PV.
AddFunc: c.onPvAdd,
@@ -269,43 +277,50 @@ func NewConfigFactory(
DeleteFunc: c.onPvDelete,
},
)
c.pVLister = pvInformer.Lister()
c.pVLister = args.PvInformer.Lister()
// This is for MaxPDVolumeCountPredicate: add/delete PVC will affect counts of PV when it is bound.
pvcInformer.Informer().AddEventHandler(
args.PvcInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: c.onPvcAdd,
UpdateFunc: c.onPvcUpdate,
DeleteFunc: c.onPvcDelete,
},
)
c.pVCLister = pvcInformer.Lister()
c.pVCLister = args.PvcInformer.Lister()
// This is for ServiceAffinity: affected by the selector of the service is updated.
// Also, if new service is added, equivalence cache will also become invalid since
// existing pods may be "captured" by this service and change this predicate result.
serviceInformer.Informer().AddEventHandler(
args.ServiceInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: c.onServiceAdd,
UpdateFunc: c.onServiceUpdate,
DeleteFunc: c.onServiceDelete,
},
)
c.serviceLister = serviceInformer.Lister()
c.serviceLister = args.ServiceInformer.Lister()
// Existing equivalence cache should not be affected by add/delete RC/Deployment etc,
// it only make sense when pod is scheduled or deleted
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
// Setup volume binder
c.volumeBinder = volumebinder.NewVolumeBinder(client, pvcInformer, pvInformer, storageClassInformer)
c.volumeBinder = volumebinder.NewVolumeBinder(args.Client, args.PvcInformer, args.PvInformer, args.StorageClassInformer, time.Duration(args.BindTimeoutSeconds)*time.Second)
args.StorageClassInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: c.onStorageClassAdd,
DeleteFunc: c.onStorageClassDelete,
},
)
}
// Setup cache comparer
comparer := &cacheComparer{
podLister: podInformer.Lister(),
nodeLister: nodeInformer.Lister(),
pdbLister: pdbInformer.Lister(),
podLister: args.PodInformer.Lister(),
nodeLister: args.NodeInformer.Lister(),
pdbLister: args.PdbInformer.Lister(),
cache: c.schedulerCache,
podQueue: c.podQueue,
}
@@ -383,6 +398,13 @@ func (c *configFactory) onPvAdd(obj interface{}) {
}
c.invalidatePredicatesForPv(pv)
}
// Pods created when there are no PVs available will be stuck in
// unschedulable queue. But unbound PVs created for static provisioning and
// delay binding storage class are skipped in PV controller dynamic
// provisiong and binding process, will not trigger events to schedule pod
// again. So we need to move pods to active queue on PV add for this
// scenario.
c.podQueue.MoveAllToActiveQueue()
}
func (c *configFactory) onPvUpdate(old, new interface{}) {
@@ -399,10 +421,22 @@ func (c *configFactory) onPvUpdate(old, new interface{}) {
}
c.invalidatePredicatesForPvUpdate(oldPV, newPV)
}
// Scheduler.bindVolumesWorker may fail to update assumed pod volume
// bindings due to conflicts if PVs are updated by PV controller or other
// parties, then scheduler will add pod back to unschedulable queue. We
// need to move pods to active queue on PV update for this scenario.
c.podQueue.MoveAllToActiveQueue()
}
func (c *configFactory) invalidatePredicatesForPvUpdate(oldPV, newPV *v1.PersistentVolume) {
invalidPredicates := sets.NewString()
// CheckVolumeBinding predicate calls SchedulerVolumeBinder.FindPodVolumes
// which will cache PVs in PodBindingCache. When PV got updated, we should
// invalidate cache, otherwise PVAssumeCache.Assume will fail with out of sync
// error.
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
invalidPredicates.Insert(predicates.CheckVolumeBindingPred)
}
for k, v := range newPV.Labels {
// If PV update modifies the zone/region labels.
if isZoneRegionLabel(k) && !reflect.DeepEqual(v, oldPV.Labels[k]) {
@@ -410,7 +444,7 @@ func (c *configFactory) invalidatePredicatesForPvUpdate(oldPV, newPV *v1.Persist
break
}
}
c.equivalencePodCache.InvalidateCachedPredicateItemOfAllNodes(invalidPredicates)
c.equivalencePodCache.InvalidatePredicates(invalidPredicates)
}
// isZoneRegionLabel check if given key of label is zone or region label.
@@ -455,6 +489,10 @@ func (c *configFactory) invalidatePredicatesForPv(pv *v1.PersistentVolume) {
invalidPredicates.Insert(predicates.MaxAzureDiskVolumeCountPred)
}
if pv.Spec.CSI != nil && utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
invalidPredicates.Insert(predicates.MaxCSIVolumeCountPred)
}
// If PV contains zone related label, it may impact cached NoVolumeZoneConflict
for k := range pv.Labels {
if isZoneRegionLabel(k) {
@@ -468,7 +506,7 @@ func (c *configFactory) invalidatePredicatesForPv(pv *v1.PersistentVolume) {
invalidPredicates.Insert(predicates.CheckVolumeBindingPred)
}
c.equivalencePodCache.InvalidateCachedPredicateItemOfAllNodes(invalidPredicates)
c.equivalencePodCache.InvalidatePredicates(invalidPredicates)
}
func (c *configFactory) onPvcAdd(obj interface{}) {
@@ -531,6 +569,10 @@ func (c *configFactory) invalidatePredicatesForPvc(pvc *v1.PersistentVolumeClaim
// The bound volume type may change
invalidPredicates := sets.NewString(maxPDVolumeCountPredicateKeys...)
if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
invalidPredicates.Insert(predicates.MaxCSIVolumeCountPred)
}
// The bound volume's label may change
invalidPredicates.Insert(predicates.NoVolumeZoneConflictPred)
@@ -538,7 +580,7 @@ func (c *configFactory) invalidatePredicatesForPvc(pvc *v1.PersistentVolumeClaim
// Add/delete impacts the available PVs to choose from
invalidPredicates.Insert(predicates.CheckVolumeBindingPred)
}
c.equivalencePodCache.InvalidateCachedPredicateItemOfAllNodes(invalidPredicates)
c.equivalencePodCache.InvalidatePredicates(invalidPredicates)
}
func (c *configFactory) invalidatePredicatesForPvcUpdate(old, new *v1.PersistentVolumeClaim) {
@@ -551,14 +593,71 @@ func (c *configFactory) invalidatePredicatesForPvcUpdate(old, new *v1.Persistent
}
// The bound volume type may change
invalidPredicates.Insert(maxPDVolumeCountPredicateKeys...)
if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
invalidPredicates.Insert(predicates.MaxCSIVolumeCountPred)
}
}
c.equivalencePodCache.InvalidateCachedPredicateItemOfAllNodes(invalidPredicates)
c.equivalencePodCache.InvalidatePredicates(invalidPredicates)
}
func (c *configFactory) onStorageClassAdd(obj interface{}) {
sc, ok := obj.(*storagev1.StorageClass)
if !ok {
glog.Errorf("cannot convert to *storagev1.StorageClass: %v", obj)
return
}
// CheckVolumeBindingPred fails if pod has unbound immediate PVCs. If these
// PVCs have specified StorageClass name, creating StorageClass objects
// with late binding will cause predicates to pass, so we need to move pods
// to active queue.
// We don't need to invalidate cached results because results will not be
// cached for pod that has unbound immediate PVCs.
if sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer {
c.podQueue.MoveAllToActiveQueue()
}
}
func (c *configFactory) onStorageClassDelete(obj interface{}) {
if c.enableEquivalenceClassCache {
var sc *storagev1.StorageClass
switch t := obj.(type) {
case *storagev1.StorageClass:
sc = t
case cache.DeletedFinalStateUnknown:
var ok bool
sc, ok = t.Obj.(*storagev1.StorageClass)
if !ok {
glog.Errorf("cannot convert to *storagev1.StorageClass: %v", t.Obj)
return
}
default:
glog.Errorf("cannot convert to *storagev1.StorageClass: %v", t)
return
}
c.invalidatePredicatesForStorageClass(sc)
}
}
func (c *configFactory) invalidatePredicatesForStorageClass(sc *storagev1.StorageClass) {
invalidPredicates := sets.NewString()
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
if sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer {
// Delete can cause predicates to fail
invalidPredicates.Insert(predicates.CheckVolumeBindingPred)
invalidPredicates.Insert(predicates.NoVolumeZoneConflictPred)
}
}
c.equivalencePodCache.InvalidatePredicates(invalidPredicates)
}
func (c *configFactory) onServiceAdd(obj interface{}) {
if c.enableEquivalenceClassCache {
c.equivalencePodCache.InvalidateCachedPredicateItemOfAllNodes(serviceAffinitySet)
c.equivalencePodCache.InvalidatePredicates(serviceAffinitySet)
}
c.podQueue.MoveAllToActiveQueue()
}
@@ -569,7 +668,7 @@ func (c *configFactory) onServiceUpdate(oldObj interface{}, newObj interface{})
oldService := oldObj.(*v1.Service)
newService := newObj.(*v1.Service)
if !reflect.DeepEqual(oldService.Spec.Selector, newService.Spec.Selector) {
c.equivalencePodCache.InvalidateCachedPredicateItemOfAllNodes(serviceAffinitySet)
c.equivalencePodCache.InvalidatePredicates(serviceAffinitySet)
}
}
c.podQueue.MoveAllToActiveQueue()
@@ -577,7 +676,7 @@ func (c *configFactory) onServiceUpdate(oldObj interface{}, newObj interface{})
func (c *configFactory) onServiceDelete(obj interface{}) {
if c.enableEquivalenceClassCache {
c.equivalencePodCache.InvalidateCachedPredicateItemOfAllNodes(serviceAffinitySet)
c.equivalencePodCache.InvalidatePredicates(serviceAffinitySet)
}
c.podQueue.MoveAllToActiveQueue()
}
@@ -595,7 +694,7 @@ func (c *configFactory) GetSchedulerName() string {
return c.schedulerName
}
// GetClient provides a kubernetes client, mostly internal use, but may also be called by mock-tests.
// GetClient provides a kubernetes Client, mostly internal use, but may also be called by mock-tests.
func (c *configFactory) GetClient() clientset.Interface {
return c.client
}
@@ -694,13 +793,13 @@ func (c *configFactory) invalidateCachedPredicatesOnUpdatePod(newPod *v1.Pod, ol
if !reflect.DeepEqual(oldPod.GetLabels(), newPod.GetLabels()) {
// MatchInterPodAffinity need to be reconsidered for this node,
// as well as all nodes in its same failure domain.
c.equivalencePodCache.InvalidateCachedPredicateItemOfAllNodes(
c.equivalencePodCache.InvalidatePredicates(
matchInterPodAffinitySet)
}
// if requested container resource changed, invalidate GeneralPredicates of this node
if !reflect.DeepEqual(predicates.GetResourceRequest(newPod),
predicates.GetResourceRequest(oldPod)) {
c.equivalencePodCache.InvalidateCachedPredicateItem(
c.equivalencePodCache.InvalidatePredicatesOnNode(
newPod.Spec.NodeName, generalPredicatesSets)
}
}
@@ -741,14 +840,14 @@ func (c *configFactory) invalidateCachedPredicatesOnDeletePod(pod *v1.Pod) {
// MatchInterPodAffinity need to be reconsidered for this node,
// as well as all nodes in its same failure domain.
// TODO(resouer) can we just do this for nodes in the same failure domain
c.equivalencePodCache.InvalidateCachedPredicateItemOfAllNodes(
c.equivalencePodCache.InvalidatePredicates(
matchInterPodAffinitySet)
// if this pod have these PV, cached result of disk conflict will become invalid.
for _, volume := range pod.Spec.Volumes {
if volume.GCEPersistentDisk != nil || volume.AWSElasticBlockStore != nil ||
volume.RBD != nil || volume.ISCSI != nil {
c.equivalencePodCache.InvalidateCachedPredicateItem(
c.equivalencePodCache.InvalidatePredicatesOnNode(
pod.Spec.NodeName, noDiskConflictSet)
}
}
@@ -766,6 +865,11 @@ func (c *configFactory) addNodeToCache(obj interface{}) {
glog.Errorf("scheduler cache AddNode failed: %v", err)
}
if c.enableEquivalenceClassCache {
// GetNodeCache() will lazily create NodeCache for given node if it does not exist.
c.equivalencePodCache.GetNodeCache(node.GetName())
}
c.podQueue.MoveAllToActiveQueue()
// NOTE: add a new node does not affect existing predicates in equivalence cache
}
@@ -858,7 +962,7 @@ func (c *configFactory) invalidateCachedPredicatesOnNodeUpdate(newNode *v1.Node,
if newNode.Spec.Unschedulable != oldNode.Spec.Unschedulable {
invalidPredicates.Insert(predicates.CheckNodeConditionPred)
}
c.equivalencePodCache.InvalidateCachedPredicateItem(newNode.GetName(), invalidPredicates)
c.equivalencePodCache.InvalidatePredicatesOnNode(newNode.GetName(), invalidPredicates)
}
}
@@ -885,7 +989,7 @@ func (c *configFactory) deleteNodeFromCache(obj interface{}) {
glog.Errorf("scheduler cache RemoveNode failed: %v", err)
}
if c.enableEquivalenceClassCache {
c.equivalencePodCache.InvalidateAllCachedPredicateItemOfNode(node.GetName())
c.equivalencePodCache.InvalidateAllPredicatesOnNode(node.GetName())
}
}
@@ -994,7 +1098,7 @@ func (c *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler
}
}
extenders := make([]algorithm.SchedulerExtender, 0)
var extenders []algorithm.SchedulerExtender
if len(policy.ExtenderConfigs) != 0 {
ignoredExtendedResources := sets.NewString()
for ii := range policy.ExtenderConfigs {
@@ -1074,7 +1178,7 @@ func (c *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String,
// Init equivalence class cache
if c.enableEquivalenceClassCache {
c.equivalencePodCache = core.NewEquivalenceCache()
c.equivalencePodCache = equivalence.NewCache()
glog.Info("Created equivalence class cache")
}
@@ -1091,6 +1195,7 @@ func (c *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String,
c.pVCLister,
c.alwaysCheckAllPredicates,
c.disablePreemption,
c.percentageOfNodesToScore,
)
podBackoff := util.CreateDefaultPodBackoff()
@@ -1178,7 +1283,7 @@ func (c *configFactory) getPluginArgs() (*PluginFactoryArgs, error) {
func (c *configFactory) getNextPod() *v1.Pod {
pod, err := c.podQueue.Pop()
if err == nil {
glog.V(4).Infof("About to try and schedule pod %v", pod.Name)
glog.V(4).Infof("About to try and schedule pod %v/%v", pod.Namespace, pod.Name)
return pod
}
glog.Errorf("Error while retrieving next pod from scheduling queue: %v", err)
@@ -1297,10 +1402,10 @@ func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) core
func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue core.SchedulingQueue) func(pod *v1.Pod, err error) {
return func(pod *v1.Pod, err error) {
if err == core.ErrNoNodesAvailable {
glog.V(4).Infof("Unable to schedule %v %v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name)
glog.V(4).Infof("Unable to schedule %v/%v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name)
} else {
if _, ok := err.(*core.FitError); ok {
glog.V(4).Infof("Unable to schedule %v %v: no fit: %v; waiting", pod.Namespace, pod.Name, err)
glog.V(4).Infof("Unable to schedule %v/%v: no fit: %v; waiting", pod.Namespace, pod.Name, err)
} else if errors.IsNotFound(err) {
if errStatus, ok := err.(errors.APIStatus); ok && errStatus.Status().Details.Kind == "node" {
nodeName := errStatus.Status().Details.Name
@@ -1315,12 +1420,12 @@ func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue
c.schedulerCache.RemoveNode(&node)
// invalidate cached predicate for the node
if c.enableEquivalenceClassCache {
c.equivalencePodCache.InvalidateAllCachedPredicateItemOfNode(nodeName)
c.equivalencePodCache.InvalidateAllPredicatesOnNode(nodeName)
}
}
}
} else {
glog.Errorf("Error scheduling %v %v: %v; retrying", pod.Namespace, pod.Name, err)
glog.Errorf("Error scheduling %v/%v: %v; retrying", pod.Namespace, pod.Name, err)
}
}
@@ -1413,7 +1518,7 @@ type podConditionUpdater struct {
}
func (p *podConditionUpdater) Update(pod *v1.Pod, condition *v1.PodCondition) error {
glog.V(2).Infof("Updating pod condition for %s/%s to (%s==%s)", pod.Namespace, pod.Name, condition.Type, condition.Status)
glog.V(3).Infof("Updating pod condition for %s/%s to (%s==%s)", pod.Namespace, pod.Name, condition.Type, condition.Status)
if podutil.UpdatePodCondition(&pod.Status, condition) {
_, err := p.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(pod)
return err

View File

@@ -49,6 +49,7 @@ import (
const (
enableEquivalenceCache = true
disablePodPreemption = false
bindTimeoutSeconds = 600
)
func TestCreate(t *testing.T) {
@@ -331,53 +332,65 @@ func TestNodeEnumerator(t *testing.T) {
t.Fatalf("expected %v, got %v", e, a)
}
for i := range testList.Items {
gotObj := me.Get(i)
if e, a := testList.Items[i].Name, gotObj.(*v1.Node).Name; e != a {
t.Errorf("Expected %v, got %v", e, a)
}
if e, a := &testList.Items[i], gotObj; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %#v, got %v#", e, a)
}
t.Run(fmt.Sprintf("node enumerator/%v", i), func(t *testing.T) {
gotObj := me.Get(i)
if e, a := testList.Items[i].Name, gotObj.(*v1.Node).Name; e != a {
t.Errorf("Expected %v, got %v", e, a)
}
if e, a := &testList.Items[i], gotObj; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %#v, got %v#", e, a)
}
})
}
}
func TestBind(t *testing.T) {
table := []struct {
name string
binding *v1.Binding
}{
{binding: &v1.Binding{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
Name: "foo",
{
name: "binding can bind and validate request",
binding: &v1.Binding{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
Name: "foo",
},
Target: v1.ObjectReference{
Name: "foohost.kubernetes.mydomain.com",
},
},
Target: v1.ObjectReference{
Name: "foohost.kubernetes.mydomain.com",
},
}},
},
}
for _, item := range table {
handler := utiltesting.FakeHandler{
StatusCode: 200,
ResponseBody: "",
T: t,
}
server := httptest.NewServer(&handler)
defer server.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
b := binder{client}
if err := b.Bind(item.binding); err != nil {
t.Errorf("Unexpected error: %v", err)
continue
}
expectedBody := runtime.EncodeOrDie(schedulertesting.Test.Codec(), item.binding)
handler.ValidateRequest(t,
schedulertesting.Test.SubResourcePath(string(v1.ResourcePods), metav1.NamespaceDefault, "foo", "binding"),
"POST", &expectedBody)
for _, test := range table {
t.Run(test.name, func(t *testing.T) {
testBind(test.binding, t)
})
}
}
func testBind(binding *v1.Binding, t *testing.T) {
handler := utiltesting.FakeHandler{
StatusCode: 200,
ResponseBody: "",
T: t,
}
server := httptest.NewServer(&handler)
defer server.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
b := binder{client}
if err := b.Bind(binding); err != nil {
t.Errorf("Unexpected error: %v", err)
return
}
expectedBody := runtime.EncodeOrDie(schedulertesting.Test.Codec(), binding)
handler.ValidateRequest(t,
schedulertesting.Test.SubResourcePath(string(v1.ResourcePods), metav1.NamespaceDefault, "foo", "binding"),
"POST", &expectedBody)
}
func TestInvalidHardPodAffinitySymmetricWeight(t *testing.T) {
handler := utiltesting.FakeHandler{
StatusCode: 500,
@@ -406,38 +419,44 @@ func TestInvalidFactoryArgs(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
testCases := []struct {
name string
hardPodAffinitySymmetricWeight int32
expectErr string
}{
{
name: "symmetric weight below range",
hardPodAffinitySymmetricWeight: -1,
expectErr: "invalid hardPodAffinitySymmetricWeight: -1, must be in the range 0-100",
},
{
name: "symmetric weight above range",
hardPodAffinitySymmetricWeight: 101,
expectErr: "invalid hardPodAffinitySymmetricWeight: 101, must be in the range 0-100",
},
}
for _, test := range testCases {
factory := newConfigFactory(client, test.hardPodAffinitySymmetricWeight)
_, err := factory.Create()
if err == nil {
t.Errorf("expected err: %s, got nothing", test.expectErr)
}
t.Run(test.name, func(t *testing.T) {
factory := newConfigFactory(client, test.hardPodAffinitySymmetricWeight)
_, err := factory.Create()
if err == nil {
t.Errorf("expected err: %s, got nothing", test.expectErr)
}
})
}
}
func TestSkipPodUpdate(t *testing.T) {
for _, test := range []struct {
table := []struct {
pod *v1.Pod
isAssumedPodFunc func(*v1.Pod) bool
getPodFunc func(*v1.Pod) *v1.Pod
expected bool
name string
}{
// Non-assumed pod should not be skipped.
{
name: "Non-assumed pod",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-0",
@@ -453,9 +472,8 @@ func TestSkipPodUpdate(t *testing.T) {
},
expected: false,
},
// Pod update (with changes on ResourceVersion, Spec.NodeName and/or
// Annotations) for an already assumed pod should be skipped.
{
name: "with changes on ResourceVersion, Spec.NodeName and/or Annotations",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-0",
@@ -483,9 +501,8 @@ func TestSkipPodUpdate(t *testing.T) {
},
expected: true,
},
// Pod update (with changes on Labels) for an already assumed pod
// should not be skipped.
{
name: "with changes on Labels",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-0",
@@ -505,23 +522,26 @@ func TestSkipPodUpdate(t *testing.T) {
},
expected: false,
},
} {
c := &configFactory{
schedulerCache: &schedulertesting.FakeCache{
IsAssumedPodFunc: test.isAssumedPodFunc,
GetPodFunc: test.getPodFunc,
},
}
got := c.skipPodUpdate(test.pod)
if got != test.expected {
t.Errorf("skipPodUpdate() = %t, expected = %t", got, test.expected)
}
}
for _, test := range table {
t.Run(test.name, func(t *testing.T) {
c := &configFactory{
schedulerCache: &schedulertesting.FakeCache{
IsAssumedPodFunc: test.isAssumedPodFunc,
GetPodFunc: test.getPodFunc,
},
}
got := c.skipPodUpdate(test.pod)
if got != test.expected {
t.Errorf("skipPodUpdate() = %t, expected = %t", got, test.expected)
}
})
}
}
func newConfigFactory(client *clientset.Clientset, hardPodAffinitySymmetricWeight int32) scheduler.Configurator {
informerFactory := informers.NewSharedInformerFactory(client, 0)
return NewConfigFactory(
return NewConfigFactory(&ConfigFactoryArgs{
v1.DefaultSchedulerName,
client,
informerFactory.Core().V1().Nodes(),
@@ -529,15 +549,17 @@ func newConfigFactory(client *clientset.Clientset, hardPodAffinitySymmetricWeigh
informerFactory.Core().V1().PersistentVolumes(),
informerFactory.Core().V1().PersistentVolumeClaims(),
informerFactory.Core().V1().ReplicationControllers(),
informerFactory.Extensions().V1beta1().ReplicaSets(),
informerFactory.Apps().V1beta1().StatefulSets(),
informerFactory.Apps().V1().ReplicaSets(),
informerFactory.Apps().V1().StatefulSets(),
informerFactory.Core().V1().Services(),
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
informerFactory.Storage().V1().StorageClasses(),
hardPodAffinitySymmetricWeight,
enableEquivalenceCache,
disablePodPreemption,
)
schedulerapi.DefaultPercentageOfNodesToScore,
bindTimeoutSeconds,
})
}
type fakeExtender struct {
@@ -593,24 +615,22 @@ func (f *fakeExtender) IsInterested(pod *v1.Pod) bool {
}
func TestGetBinderFunc(t *testing.T) {
for _, test := range []struct {
podName string
extenders []algorithm.SchedulerExtender
table := []struct {
podName string
extenders []algorithm.SchedulerExtender
expectedBinderType string
name string
}{
// Expect to return the default binder because the extender is not a
// binder, even though it's interested in the pod.
{
name: "the extender is not a binder",
podName: "pod0",
extenders: []algorithm.SchedulerExtender{
&fakeExtender{isBinder: false, interestedPodName: "pod0"},
},
expectedBinderType: "*factory.binder",
},
// Expect to return the fake binder because one of the extenders is a
// binder and it's interested in the pod.
{
name: "one of the extenders is a binder and interested in pod",
podName: "pod0",
extenders: []algorithm.SchedulerExtender{
&fakeExtender{isBinder: false, interestedPodName: "pod0"},
@@ -618,9 +638,8 @@ func TestGetBinderFunc(t *testing.T) {
},
expectedBinderType: "*factory.fakeExtender",
},
// Expect to return the default binder because one of the extenders is
// a binder but the binder is not interested in the pod.
{
name: "one of the extenders is a binder, but not interested in pod",
podName: "pod1",
extenders: []algorithm.SchedulerExtender{
&fakeExtender{isBinder: false, interestedPodName: "pod1"},
@@ -628,20 +647,28 @@ func TestGetBinderFunc(t *testing.T) {
},
expectedBinderType: "*factory.binder",
},
} {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: test.podName,
},
}
}
f := &configFactory{}
binderFunc := f.getBinderFunc(test.extenders)
binder := binderFunc(pod)
binderType := fmt.Sprintf("%s", reflect.TypeOf(binder))
if binderType != test.expectedBinderType {
t.Errorf("Expected binder %q but got %q", test.expectedBinderType, binderType)
}
for _, test := range table {
t.Run(test.name, func(t *testing.T) {
testGetBinderFunc(test.expectedBinderType, test.podName, test.extenders, t)
})
}
}
func testGetBinderFunc(expectedBinderType, podName string, extenders []algorithm.SchedulerExtender, t *testing.T) {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
}
f := &configFactory{}
binderFunc := f.getBinderFunc(extenders)
binder := binderFunc(pod)
binderType := fmt.Sprintf("%s", reflect.TypeOf(binder))
if binderType != expectedBinderType {
t.Errorf("Expected binder %q but got %q", expectedBinderType, binderType)
}
}

View File

@@ -36,14 +36,18 @@ func TestAlgorithmNameValidation(t *testing.T) {
"Some,Alg:orithm",
}
for _, name := range algorithmNamesShouldValidate {
if !validName.MatchString(name) {
t.Errorf("%v should be a valid algorithm name but is not valid.", name)
}
t.Run(name, func(t *testing.T) {
if !validName.MatchString(name) {
t.Errorf("should be a valid algorithm name but is not valid.")
}
})
}
for _, name := range algorithmNamesShouldNotValidate {
if validName.MatchString(name) {
t.Errorf("%v should be an invalid algorithm name but is valid.", name)
}
t.Run(name, func(t *testing.T) {
if validName.MatchString(name) {
t.Errorf("should be an invalid algorithm name but is valid.")
}
})
}
}
@@ -70,16 +74,18 @@ func TestValidatePriorityConfigOverFlow(t *testing.T) {
},
}
for _, test := range tests {
err := validateSelectedConfigs(test.configs)
if test.expected {
if err == nil {
t.Errorf("Expected Overflow for %s", test.description)
t.Run(test.description, func(t *testing.T) {
err := validateSelectedConfigs(test.configs)
if test.expected {
if err == nil {
t.Errorf("Expected Overflow")
}
} else {
if err != nil {
t.Errorf("Did not expect an overflow")
}
}
} else {
if err != nil {
t.Errorf("Did not expect an overflow for %s", test.description)
}
}
})
}
}