Bumping k8s dependencies to 1.13
This commit is contained in:
89
vendor/k8s.io/kubernetes/test/integration/scheduler/BUILD
generated
vendored
89
vendor/k8s.io/kubernetes/test/integration/scheduler/BUILD
generated
vendored
@@ -25,47 +25,51 @@ go_test(
|
||||
"//cmd/kube-scheduler/app:go_default_library",
|
||||
"//cmd/kube-scheduler/app/config:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/controller/nodelifecycle:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume/options:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -96,26 +100,27 @@ go_library(
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
62
vendor/k8s.io/kubernetes/test/integration/scheduler/priorities_test.go
generated
vendored
62
vendor/k8s.io/kubernetes/test/integration/scheduler/priorities_test.go
generated
vendored
@@ -22,6 +22,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// This file tests the scheduler priority functions.
|
||||
@@ -172,3 +173,64 @@ func TestPodAffinity(t *testing.T) {
|
||||
}
|
||||
t.Errorf("Pod %v got scheduled on an unexpected node: %v.", podName, pod.Spec.NodeName)
|
||||
}
|
||||
|
||||
// TestImageLocality verifies that the scheduler's image locality priority function
|
||||
// works correctly, i.e., the pod gets scheduled to the node where its container images are ready.
|
||||
func TestImageLocality(t *testing.T) {
|
||||
context := initTest(t, "image-locality")
|
||||
defer cleanupTest(t, context)
|
||||
|
||||
// Add a few nodes.
|
||||
_, err := createNodes(context.clientSet, "testnode", nil, 10)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create nodes: %v", err)
|
||||
}
|
||||
|
||||
// We use a fake large image as the test image used by the pod, which has relatively large image size.
|
||||
image := v1.ContainerImage{
|
||||
Names: []string{
|
||||
"fake-large-image:v1",
|
||||
},
|
||||
SizeBytes: 3000 * 1024 * 1024,
|
||||
}
|
||||
|
||||
// Create a node with the large image
|
||||
nodeWithLargeImage, err := createNodeWithImages(context.clientSet, "testnode-large-image", nil, []v1.ContainerImage{image})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create node with a large image: %v", err)
|
||||
}
|
||||
|
||||
// Create a pod with containers each having the specified image.
|
||||
podName := "pod-using-large-image"
|
||||
pod, err := runPodWithContainers(context.clientSet, initPodWithContainers(context.clientSet, &podWithContainersConfig{
|
||||
Name: podName,
|
||||
Namespace: context.ns.Name,
|
||||
Containers: makeContainersWithImages(image.Names),
|
||||
}))
|
||||
if err != nil {
|
||||
t.Fatalf("error running pod with images: %v", err)
|
||||
}
|
||||
if pod.Spec.NodeName != nodeWithLargeImage.Name {
|
||||
t.Errorf("pod %v got scheduled on an unexpected node: %v. Expected node: %v.", podName, pod.Spec.NodeName, nodeWithLargeImage.Name)
|
||||
} else {
|
||||
t.Logf("pod %v got successfully scheduled on node %v.", podName, pod.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
|
||||
// makeContainerWithImage returns a list of v1.Container objects for each given image. Duplicates of an image are ignored,
|
||||
// i.e., each image is used only once.
|
||||
func makeContainersWithImages(images []string) []v1.Container {
|
||||
var containers []v1.Container
|
||||
usedImages := make(map[string]struct{})
|
||||
|
||||
for _, image := range images {
|
||||
if _, ok := usedImages[image]; !ok {
|
||||
containers = append(containers, v1.Container{
|
||||
Name: strings.Replace(image, ":", "-", -1) + "-container",
|
||||
Image: image,
|
||||
})
|
||||
usedImages[image] = struct{}{}
|
||||
}
|
||||
}
|
||||
return containers
|
||||
}
|
||||
|
26
vendor/k8s.io/kubernetes/test/integration/scheduler/scheduler_test.go
generated
vendored
26
vendor/k8s.io/kubernetes/test/integration/scheduler/scheduler_test.go
generated
vendored
@@ -45,11 +45,11 @@ import (
|
||||
schedulerapp "k8s.io/kubernetes/cmd/kube-scheduler/app"
|
||||
schedulerappconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
@@ -140,6 +140,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
"GeneralPredicates",
|
||||
"MatchInterPodAffinity",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"MaxCSIVolumeCountPred",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
"NoDiskConflict",
|
||||
@@ -154,6 +155,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
"NodePreferAvoidPodsPriority",
|
||||
"SelectorSpreadPriority",
|
||||
"TaintTolerationPriority",
|
||||
"ImageLocalityPriority",
|
||||
),
|
||||
},
|
||||
{
|
||||
@@ -173,7 +175,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
configPolicyName := fmt.Sprintf("scheduler-custom-policy-config-%d", i)
|
||||
policyConfigMap := v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: configPolicyName},
|
||||
Data: map[string]string{componentconfig.SchedulerPolicyConfigMapKey: test.policy},
|
||||
Data: map[string]string{kubeschedulerconfig.SchedulerPolicyConfigMapKey: test.policy},
|
||||
}
|
||||
|
||||
policyConfigMap.APIVersion = "v1"
|
||||
@@ -182,18 +184,20 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
|
||||
|
||||
defaultBindTimeout := int64(30)
|
||||
ss := &schedulerappconfig.Config{
|
||||
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
AlgorithmSource: kubeschedulerconfig.SchedulerAlgorithmSource{
|
||||
Policy: &kubeschedulerconfig.SchedulerPolicySource{
|
||||
ConfigMap: &kubeschedulerconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: policyConfigMap.Namespace,
|
||||
Name: policyConfigMap.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
BindTimeoutSeconds: &defaultBindTimeout,
|
||||
},
|
||||
Client: clientSet,
|
||||
InformerFactory: informerFactory,
|
||||
@@ -243,18 +247,20 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
|
||||
|
||||
defaultBindTimeout := int64(30)
|
||||
ss := &schedulerappconfig.Config{
|
||||
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
AlgorithmSource: kubeschedulerconfig.SchedulerAlgorithmSource{
|
||||
Policy: &kubeschedulerconfig.SchedulerPolicySource{
|
||||
ConfigMap: &kubeschedulerconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: "non-existent-config",
|
||||
Name: "non-existent-config",
|
||||
},
|
||||
},
|
||||
},
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
BindTimeoutSeconds: &defaultBindTimeout,
|
||||
},
|
||||
Client: clientSet,
|
||||
InformerFactory: informerFactory,
|
||||
|
708
vendor/k8s.io/kubernetes/test/integration/scheduler/taint_test.go
generated
vendored
708
vendor/k8s.io/kubernetes/test/integration/scheduler/taint_test.go
generated
vendored
@@ -19,7 +19,7 @@ package scheduler
|
||||
// This file tests the Taint feature.
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -29,7 +29,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
"k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||
@@ -40,10 +39,28 @@ import (
|
||||
pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
|
||||
)
|
||||
|
||||
// TestTaintNodeByCondition verifies:
|
||||
// 1. MemoryPressure Toleration is added to non-BestEffort Pod by PodTolerationRestriction
|
||||
// 2. NodeController taints nodes by node condition
|
||||
// 3. Scheduler allows pod to tolerate node condition taints, e.g. network unavailable
|
||||
func newPod(nsName, name string, req, limit v1.ResourceList) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: nsName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: "busybox",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: req,
|
||||
Limits: limit,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TestTaintNodeByCondition tests related cases for TaintNodeByCondition feature.
|
||||
func TestTaintNodeByCondition(t *testing.T) {
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled("TaintNodesByCondition")
|
||||
defer func() {
|
||||
@@ -76,7 +93,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
algorithmprovider.ApplyFeatureGates()
|
||||
|
||||
context = initTestScheduler(t, context, controllerCh, false, nil)
|
||||
clientset := context.clientSet
|
||||
cs := context.clientSet
|
||||
informers := context.informerFactory
|
||||
nsName := context.ns.Name
|
||||
|
||||
@@ -86,8 +103,8 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
informers.Core().V1().Nodes(),
|
||||
informers.Extensions().V1beta1().DaemonSets(),
|
||||
nil, // CloudProvider
|
||||
clientset,
|
||||
time.Second, // Node monitor grace period
|
||||
cs,
|
||||
time.Hour, // Node monitor grace period
|
||||
time.Second, // Node startup grace period
|
||||
time.Second, // Node monitor period
|
||||
time.Second, // Pod eviction timeout
|
||||
@@ -108,87 +125,375 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
// Waiting for all controller sync.
|
||||
internalInformers.Start(controllerCh)
|
||||
internalInformers.WaitForCacheSync(controllerCh)
|
||||
informers.Start(controllerCh)
|
||||
informers.WaitForCacheSync(controllerCh)
|
||||
|
||||
// -------------------------------------------
|
||||
// Test TaintNodeByCondition feature.
|
||||
// -------------------------------------------
|
||||
nodeRes := v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
v1.ResourcePods: resource.MustParse("110"),
|
||||
}
|
||||
|
||||
podRes := v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
}
|
||||
|
||||
notReadyToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
unreachableToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
unschedulableToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
outOfDiskToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeOutOfDisk,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
memoryPressureToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeMemoryPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
// Case 1: Add MememoryPressure Toleration for non-BestEffort pod.
|
||||
burstablePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "burstable-pod",
|
||||
Namespace: nsName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
diskPressureToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeDiskPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
networkUnavailableToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeNetworkUnavailable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
pidPressureToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodePIDPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
bestEffortPod := newPod(nsName, "besteffort-pod", nil, nil)
|
||||
burstablePod := newPod(nsName, "burstable-pod", podRes, nil)
|
||||
guaranteePod := newPod(nsName, "guarantee-pod", podRes, podRes)
|
||||
|
||||
type podCase struct {
|
||||
pod *v1.Pod
|
||||
tolerations []v1.Toleration
|
||||
fits bool
|
||||
}
|
||||
|
||||
// switch to table driven testings
|
||||
tests := []struct {
|
||||
name string
|
||||
existingTaints []v1.Taint
|
||||
nodeConditions []v1.NodeCondition
|
||||
unschedulable bool
|
||||
expectedTaints []v1.Taint
|
||||
pods []podCase
|
||||
}{
|
||||
{
|
||||
name: "not-ready node",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: "busybox",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
},
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{notReadyToleration},
|
||||
fits: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unreachable node",
|
||||
existingTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionUnknown, // node status is "Unknown"
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{unreachableToleration},
|
||||
fits: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unschedulable node",
|
||||
unschedulable: true, // node.spec.unschedulable = true
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{unschedulableToleration},
|
||||
fits: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "out of disk node",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeOutOfDisk,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeOutOfDisk,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
// In OutOfDisk condition, only pods with toleration can be scheduled.
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{outOfDiskToleration},
|
||||
fits: true,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{diskPressureToleration},
|
||||
fits: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "memory pressure node",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeMemoryPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeMemoryPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
// In MemoryPressure condition, both Burstable and Guarantee pods are scheduled;
|
||||
// BestEffort pod with toleration are also scheduled.
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{memoryPressureToleration},
|
||||
fits: true,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{diskPressureToleration},
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: true,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "disk pressure node",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeDiskPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeDiskPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
// In DiskPressure condition, only pods with toleration can be scheduled.
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{diskPressureToleration},
|
||||
fits: true,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{memoryPressureToleration},
|
||||
fits: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "network unavailable and node is ready",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeNetworkUnavailable,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeNetworkUnavailable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
tolerations: []v1.Toleration{
|
||||
networkUnavailableToleration,
|
||||
},
|
||||
fits: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
burstablePodInServ, err := clientset.CoreV1().Pods(nsName).Create(burstablePod)
|
||||
if err != nil {
|
||||
t.Errorf("Case 1: Failed to create pod: %v", err)
|
||||
} else if !reflect.DeepEqual(burstablePodInServ.Spec.Tolerations, []v1.Toleration{memoryPressureToleration}) {
|
||||
t.Errorf("Case 1: Unexpected toleration of non-BestEffort pod, expected: %+v, got: %v",
|
||||
[]v1.Toleration{memoryPressureToleration},
|
||||
burstablePodInServ.Spec.Tolerations)
|
||||
}
|
||||
|
||||
// Case 2: No MemoryPressure Toleration for BestEffort pod.
|
||||
besteffortPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "best-effort-pod",
|
||||
Namespace: nsName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: "busybox",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
besteffortPodInServ, err := clientset.CoreV1().Pods(nsName).Create(besteffortPod)
|
||||
if err != nil {
|
||||
t.Errorf("Case 2: Failed to create pod: %v", err)
|
||||
} else if len(besteffortPodInServ.Spec.Tolerations) != 0 {
|
||||
t.Errorf("Case 2: Unexpected toleration # of BestEffort pod, expected: 0, got: %v",
|
||||
len(besteffortPodInServ.Spec.Tolerations))
|
||||
}
|
||||
|
||||
// Case 3: Taint Node by NetworkUnavailable condition.
|
||||
networkUnavailableNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-1",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
v1.ResourcePods: resource.MustParse("110"),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
v1.ResourcePods: resource.MustParse("110"),
|
||||
},
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
name: "network unavailable and node is not ready",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeNetworkUnavailable,
|
||||
Status: v1.ConditionTrue,
|
||||
@@ -198,116 +503,175 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
nodeInformerCh := make(chan bool)
|
||||
nodeInformer := informers.Core().V1().Nodes().Informer()
|
||||
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
curNode := cur.(*v1.Node)
|
||||
if curNode.Name != "node-1" {
|
||||
return
|
||||
}
|
||||
for _, taint := range curNode.Spec.Taints {
|
||||
if taint.Key == algorithm.TaintNodeNetworkUnavailable &&
|
||||
taint.Effect == v1.TaintEffectNoSchedule {
|
||||
nodeInformerCh <- true
|
||||
break
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
if _, err := clientset.CoreV1().Nodes().Create(networkUnavailableNode); err != nil {
|
||||
t.Errorf("Case 3: Failed to create node: %v", err)
|
||||
} else {
|
||||
select {
|
||||
case <-time.After(60 * time.Second):
|
||||
t.Errorf("Case 3: Failed to taint node after 60s.")
|
||||
case <-nodeInformerCh:
|
||||
}
|
||||
}
|
||||
|
||||
// Case 4: Schedule Pod with NetworkUnavailable toleration.
|
||||
networkDaemonPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "network-daemon-pod",
|
||||
Namespace: nsName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: "busybox",
|
||||
Key: algorithm.TaintNodeNetworkUnavailable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
Tolerations: []v1.Toleration{
|
||||
pods: []podCase{
|
||||
{
|
||||
Key: algorithm.TaintNodeNetworkUnavailable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
tolerations: []v1.Toleration{
|
||||
networkUnavailableToleration,
|
||||
},
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
tolerations: []v1.Toleration{
|
||||
networkUnavailableToleration,
|
||||
notReadyToleration,
|
||||
},
|
||||
fits: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pid pressure node",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodePIDPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodePIDPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{pidPressureToleration},
|
||||
fits: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multi taints on node",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodePIDPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeMemoryPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeDiskPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeDiskPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodeMemoryPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodePIDPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := clientset.CoreV1().Pods(nsName).Create(networkDaemonPod); err != nil {
|
||||
t.Errorf("Case 4: Failed to create pod for network daemon: %v", err)
|
||||
} else {
|
||||
if err := waitForPodToScheduleWithTimeout(clientset, networkDaemonPod, time.Second*60); err != nil {
|
||||
t.Errorf("Case 4: Failed to schedule network daemon pod in 60s.")
|
||||
}
|
||||
}
|
||||
|
||||
// Case 5: Taint node by unschedulable condition
|
||||
unschedulableNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-2",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
v1.ResourcePods: resource.MustParse("110"),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
v1.ResourcePods: resource.MustParse("110"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
nodeInformerCh2 := make(chan bool)
|
||||
nodeInformer2 := informers.Core().V1().Nodes().Informer()
|
||||
nodeInformer2.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
curNode := cur.(*v1.Node)
|
||||
if curNode.Name != "node-2" {
|
||||
return
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-1",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Unschedulable: test.unschedulable,
|
||||
Taints: test.existingTaints,
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: nodeRes,
|
||||
Allocatable: nodeRes,
|
||||
Conditions: test.nodeConditions,
|
||||
},
|
||||
}
|
||||
|
||||
for _, taint := range curNode.Spec.Taints {
|
||||
if taint.Key == algorithm.TaintNodeUnschedulable &&
|
||||
taint.Effect == v1.TaintEffectNoSchedule {
|
||||
nodeInformerCh2 <- true
|
||||
break
|
||||
if _, err := cs.CoreV1().Nodes().Create(node); err != nil {
|
||||
t.Errorf("Failed to create node, err: %v", err)
|
||||
}
|
||||
if err := waitForNodeTaints(cs, node, test.expectedTaints); err != nil {
|
||||
t.Errorf("Failed to taint node <%s>, err: %v", node.Name, err)
|
||||
}
|
||||
|
||||
var pods []*v1.Pod
|
||||
for i, p := range test.pods {
|
||||
pod := p.pod.DeepCopy()
|
||||
pod.Name = fmt.Sprintf("%s-%d", pod.Name, i)
|
||||
pod.Spec.Tolerations = p.tolerations
|
||||
|
||||
createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod %s/%s, error: %v",
|
||||
pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
pods = append(pods, createdPod)
|
||||
|
||||
if p.fits {
|
||||
if err := waitForPodToSchedule(cs, createdPod); err != nil {
|
||||
t.Errorf("Failed to schedule pod %s/%s on the node, err: %v",
|
||||
pod.Namespace, pod.Name, err)
|
||||
}
|
||||
} else {
|
||||
if err := waitForPodUnschedulable(cs, createdPod); err != nil {
|
||||
t.Errorf("Unschedulable pod %s/%s gets scheduled on the node, err: %v",
|
||||
pod.Namespace, pod.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
if _, err := clientset.CoreV1().Nodes().Create(unschedulableNode); err != nil {
|
||||
t.Errorf("Case 5: Failed to create node: %v", err)
|
||||
} else {
|
||||
select {
|
||||
case <-time.After(60 * time.Second):
|
||||
t.Errorf("Case 5: Failed to taint node after 60s.")
|
||||
case <-nodeInformerCh2:
|
||||
}
|
||||
cleanupPods(cs, t, pods)
|
||||
cleanupNodes(cs, t)
|
||||
waitForSchedulerCacheCleanup(context.scheduler, t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
162
vendor/k8s.io/kubernetes/test/integration/scheduler/util.go
generated
vendored
162
vendor/k8s.io/kubernetes/test/integration/scheduler/util.go
generated
vendored
@@ -51,6 +51,7 @@ import (
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@@ -73,23 +74,25 @@ func createConfiguratorWithPodInformer(
|
||||
podInformer coreinformers.PodInformer,
|
||||
informerFactory informers.SharedInformerFactory,
|
||||
) scheduler.Configurator {
|
||||
return factory.NewConfigFactory(
|
||||
schedulerName,
|
||||
clientSet,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
podInformer,
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.EnableEquivalenceClassCache),
|
||||
false,
|
||||
)
|
||||
return factory.NewConfigFactory(&factory.ConfigFactoryArgs{
|
||||
SchedulerName: schedulerName,
|
||||
Client: clientSet,
|
||||
NodeInformer: informerFactory.Core().V1().Nodes(),
|
||||
PodInformer: podInformer,
|
||||
PvInformer: informerFactory.Core().V1().PersistentVolumes(),
|
||||
PvcInformer: informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
ReplicationControllerInformer: informerFactory.Core().V1().ReplicationControllers(),
|
||||
ReplicaSetInformer: informerFactory.Apps().V1().ReplicaSets(),
|
||||
StatefulSetInformer: informerFactory.Apps().V1().StatefulSets(),
|
||||
ServiceInformer: informerFactory.Core().V1().Services(),
|
||||
PdbInformer: informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
StorageClassInformer: informerFactory.Storage().V1().StorageClasses(),
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
EnableEquivalenceClassCache: utilfeature.DefaultFeatureGate.Enabled(features.EnableEquivalenceClassCache),
|
||||
DisablePreemption: false,
|
||||
PercentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
|
||||
BindTimeoutSeconds: 600,
|
||||
})
|
||||
}
|
||||
|
||||
// initTestMasterAndScheduler initializes a test environment and creates a master with default
|
||||
@@ -141,7 +144,7 @@ func initTestScheduler(
|
||||
) *TestContext {
|
||||
// Pod preemption is enabled by default scheduler configuration, but preemption only happens when PodPriority
|
||||
// feature gate is enabled at the same time.
|
||||
return initTestSchedulerWithOptions(t, context, controllerCh, setPodInformer, policy, false)
|
||||
return initTestSchedulerWithOptions(t, context, controllerCh, setPodInformer, policy, false, false, time.Second)
|
||||
}
|
||||
|
||||
// initTestSchedulerWithOptions initializes a test environment and creates a scheduler with default
|
||||
@@ -153,15 +156,18 @@ func initTestSchedulerWithOptions(
|
||||
setPodInformer bool,
|
||||
policy *schedulerapi.Policy,
|
||||
disablePreemption bool,
|
||||
disableEquivalenceCache bool,
|
||||
resyncPeriod time.Duration,
|
||||
) *TestContext {
|
||||
// Enable EnableEquivalenceClassCache for all integration tests.
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(
|
||||
t,
|
||||
utilfeature.DefaultFeatureGate,
|
||||
features.EnableEquivalenceClassCache, true)()
|
||||
if !disableEquivalenceCache {
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(
|
||||
t,
|
||||
utilfeature.DefaultFeatureGate,
|
||||
features.EnableEquivalenceClassCache, true)()
|
||||
}
|
||||
|
||||
// 1. Create scheduler
|
||||
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, time.Second)
|
||||
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, resyncPeriod)
|
||||
|
||||
var podInformer coreinformers.PodInformer
|
||||
|
||||
@@ -253,7 +259,7 @@ func initTest(t *testing.T, nsPrefix string) *TestContext {
|
||||
// configuration but with pod preemption disabled.
|
||||
func initTestDisablePreemption(t *testing.T, nsPrefix string) *TestContext {
|
||||
return initTestSchedulerWithOptions(
|
||||
t, initTestMaster(t, nsPrefix, nil), nil, true, nil, true)
|
||||
t, initTestMaster(t, nsPrefix, nil), nil, true, nil, true, false, time.Second)
|
||||
}
|
||||
|
||||
// cleanupTest deletes the scheduler and the test namespace. It should be called
|
||||
@@ -322,24 +328,35 @@ func waitForNodeLabels(cs clientset.Interface, nodeName string, labels map[strin
|
||||
return wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, nodeHasLabels(cs, nodeName, labels))
|
||||
}
|
||||
|
||||
// createNode creates a node with the given resource list and
|
||||
// returns a pointer and error status. If 'res' is nil, a predefined amount of
|
||||
// initNode returns a node with the given resource list and images. If 'res' is nil, a predefined amount of
|
||||
// resource will be used.
|
||||
func createNode(cs clientset.Interface, name string, res *v1.ResourceList) (*v1.Node, error) {
|
||||
func initNode(name string, res *v1.ResourceList, images []v1.ContainerImage) *v1.Node {
|
||||
// if resource is nil, we use a default amount of resources for the node.
|
||||
if res == nil {
|
||||
res = &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
}
|
||||
}
|
||||
|
||||
n := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: *res,
|
||||
Images: images,
|
||||
},
|
||||
}
|
||||
return cs.CoreV1().Nodes().Create(n)
|
||||
return n
|
||||
}
|
||||
|
||||
// createNode creates a node with the given resource list.
|
||||
func createNode(cs clientset.Interface, name string, res *v1.ResourceList) (*v1.Node, error) {
|
||||
return cs.CoreV1().Nodes().Create(initNode(name, res, nil))
|
||||
}
|
||||
|
||||
// createNodeWithImages creates a node with the given resource list and images.
|
||||
func createNodeWithImages(cs clientset.Interface, name string, res *v1.ResourceList, images []v1.ContainerImage) (*v1.Node, error) {
|
||||
return cs.CoreV1().Nodes().Create(initNode(name, res, images))
|
||||
}
|
||||
|
||||
// updateNodeStatus updates the status of node.
|
||||
@@ -363,6 +380,44 @@ func createNodes(cs clientset.Interface, prefix string, res *v1.ResourceList, nu
|
||||
return nodes[:], nil
|
||||
}
|
||||
|
||||
// nodeTainted return a condition function that returns true if the given node contains
|
||||
// the taints.
|
||||
func nodeTainted(cs clientset.Interface, nodeName string, taints []v1.Taint) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(taints) != len(node.Spec.Taints) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, taint := range taints {
|
||||
if !taintutils.TaintExists(node.Spec.Taints, &taint) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// waitForNodeTaints waits for a node to have the target taints and returns
|
||||
// an error if it does not have taints within the given timeout.
|
||||
func waitForNodeTaints(cs clientset.Interface, node *v1.Node, taints []v1.Taint) error {
|
||||
return wait.Poll(100*time.Millisecond, 30*time.Second, nodeTainted(cs, node.Name, taints))
|
||||
}
|
||||
|
||||
// cleanupNodes deletes all nodes.
|
||||
func cleanupNodes(cs clientset.Interface, t *testing.T) {
|
||||
err := cs.CoreV1().Nodes().DeleteCollection(
|
||||
metav1.NewDeleteOptions(0), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("error while deleting all nodes: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
type pausePodConfig struct {
|
||||
Name string
|
||||
Namespace string
|
||||
@@ -451,6 +506,43 @@ func runPausePod(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error) {
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
type podWithContainersConfig struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Containers []v1.Container
|
||||
}
|
||||
|
||||
// initPodWithContainers initializes a pod API object from the given config. This is used primarily for generating
|
||||
// pods with containers each having a specific image.
|
||||
func initPodWithContainers(cs clientset.Interface, conf *podWithContainersConfig) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: conf.Name,
|
||||
Namespace: conf.Namespace,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: conf.Containers,
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
// runPodWithContainers creates a pod with given config and containers and waits
|
||||
// until it is scheduled. It returns its pointer and error status.
|
||||
func runPodWithContainers(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error) {
|
||||
pod, err := cs.CoreV1().Pods(pod.Namespace).Create(pod)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error creating pod-with-containers: %v", err)
|
||||
}
|
||||
if err = waitForPodToSchedule(cs, pod); err != nil {
|
||||
return pod, fmt.Errorf("Pod %v didn't schedule successfully. Error: %v", pod.Name, err)
|
||||
}
|
||||
if pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil {
|
||||
return pod, fmt.Errorf("Error getting pod %v info: %v", pod.Name, err)
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// podDeleted returns true if a pod is not found in the given namespace.
|
||||
func podDeleted(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
@@ -638,3 +730,15 @@ func cleanupPodsInNamespace(cs clientset.Interface, t *testing.T, ns string) {
|
||||
t.Errorf("error while waiting for pods in namespace %v: %v", ns, err)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForSchedulerCacheCleanup(sched *scheduler.Scheduler, t *testing.T) {
|
||||
schedulerCacheIsEmpty := func() (bool, error) {
|
||||
snapshot := sched.Cache().Snapshot()
|
||||
|
||||
return len(snapshot.Nodes) == 0 && len(snapshot.AssumedPods) == 0, nil
|
||||
}
|
||||
|
||||
if err := wait.Poll(time.Second, wait.ForeverTestTimeout, schedulerCacheIsEmpty); err != nil {
|
||||
t.Errorf("Failed to wait for scheduler cache cleanup: %v", err)
|
||||
}
|
||||
}
|
||||
|
432
vendor/k8s.io/kubernetes/test/integration/scheduler/volume_binding_test.go
generated
vendored
432
vendor/k8s.io/kubernetes/test/integration/scheduler/volume_binding_test.go
generated
vendored
@@ -20,6 +20,7 @@ package scheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -32,10 +33,17 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
|
||||
persistentvolumeoptions "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/options"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
type testConfig struct {
|
||||
@@ -55,14 +63,21 @@ var (
|
||||
|
||||
classWait = "wait"
|
||||
classImmediate = "immediate"
|
||||
classDynamic = "dynamic"
|
||||
|
||||
sharedClasses = map[storagev1.VolumeBindingMode]*storagev1.StorageClass{
|
||||
modeImmediate: makeStorageClass(classImmediate, &modeImmediate),
|
||||
modeWait: makeStorageClass(classWait, &modeWait),
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
node1 = "node-1"
|
||||
node2 = "node-2"
|
||||
podLimit = 100
|
||||
volsPerPod = 5
|
||||
nodeAffinityLabelKey = "kubernetes.io/hostname"
|
||||
node1 = "node-1"
|
||||
node2 = "node-2"
|
||||
podLimit = 100
|
||||
volsPerPod = 5
|
||||
nodeAffinityLabelKey = "kubernetes.io/hostname"
|
||||
provisionerPluginName = "kubernetes.io/mock-provisioner"
|
||||
)
|
||||
|
||||
type testPV struct {
|
||||
@@ -79,7 +94,11 @@ type testPVC struct {
|
||||
}
|
||||
|
||||
func TestVolumeBinding(t *testing.T) {
|
||||
config := setupCluster(t, "volume-scheduling", 2)
|
||||
features := map[string]bool{
|
||||
"VolumeScheduling": true,
|
||||
"PersistentLocalVolumes": true,
|
||||
}
|
||||
config := setupCluster(t, "volume-scheduling-", 2, features, 0, 0, false)
|
||||
defer config.teardown()
|
||||
|
||||
cases := map[string]struct {
|
||||
@@ -246,26 +265,194 @@ func TestVolumeBinding(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestVolumeBindingStress creates <podLimit> pods, each with <volsPerPod> unbound PVCs.
|
||||
func TestVolumeBindingStress(t *testing.T) {
|
||||
config := setupCluster(t, "volume-binding-stress", 1)
|
||||
// TestVolumeBindingRescheduling tests scheduler will retry scheduling when needed.
|
||||
func TestVolumeBindingRescheduling(t *testing.T) {
|
||||
features := map[string]bool{
|
||||
"VolumeScheduling": true,
|
||||
"PersistentLocalVolumes": true,
|
||||
}
|
||||
config := setupCluster(t, "volume-scheduling-", 2, features, 0, 0, false)
|
||||
defer config.teardown()
|
||||
|
||||
storageClassName := "local-storage"
|
||||
|
||||
cases := map[string]struct {
|
||||
pod *v1.Pod
|
||||
pvcs []*testPVC
|
||||
pvs []*testPV
|
||||
trigger func(config *testConfig)
|
||||
shouldFail bool
|
||||
}{
|
||||
"reschedule on WaitForFirstConsumer dynamic storage class add": {
|
||||
pod: makePod("pod-reschedule-onclassadd-dynamic", config.ns, []string{"pvc-reschedule-onclassadd-dynamic"}),
|
||||
pvcs: []*testPVC{
|
||||
{"pvc-reschedule-onclassadd-dynamic", "", ""},
|
||||
},
|
||||
trigger: func(config *testConfig) {
|
||||
sc := makeDynamicProvisionerStorageClass(storageClassName, &modeWait)
|
||||
if _, err := config.client.StorageV1().StorageClasses().Create(sc); err != nil {
|
||||
t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err)
|
||||
}
|
||||
},
|
||||
shouldFail: false,
|
||||
},
|
||||
"reschedule on WaitForFirstConsumer static storage class add": {
|
||||
pod: makePod("pod-reschedule-onclassadd-static", config.ns, []string{"pvc-reschedule-onclassadd-static"}),
|
||||
pvcs: []*testPVC{
|
||||
{"pvc-reschedule-onclassadd-static", "", ""},
|
||||
},
|
||||
trigger: func(config *testConfig) {
|
||||
sc := makeStorageClass(storageClassName, &modeWait)
|
||||
if _, err := config.client.StorageV1().StorageClasses().Create(sc); err != nil {
|
||||
t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err)
|
||||
}
|
||||
// Create pv for this class to mock static provisioner behavior.
|
||||
pv := makePV("pv-reschedule-onclassadd-static", storageClassName, "", "", node1)
|
||||
if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
}
|
||||
},
|
||||
shouldFail: false,
|
||||
},
|
||||
"reschedule on delay binding PVC add": {
|
||||
pod: makePod("pod-reschedule-onpvcadd", config.ns, []string{"pvc-reschedule-onpvcadd"}),
|
||||
pvs: []*testPV{
|
||||
{
|
||||
name: "pv-reschedule-onpvcadd",
|
||||
scMode: modeWait,
|
||||
node: node1,
|
||||
},
|
||||
},
|
||||
trigger: func(config *testConfig) {
|
||||
pvc := makePVC("pvc-reschedule-onpvcadd", config.ns, &classWait, "")
|
||||
if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
||||
}
|
||||
},
|
||||
shouldFail: false,
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range cases {
|
||||
glog.Infof("Running test %v", name)
|
||||
|
||||
if test.pod == nil {
|
||||
t.Fatal("pod is required for this test")
|
||||
}
|
||||
|
||||
// Create unbound pvc
|
||||
for _, pvcConfig := range test.pvcs {
|
||||
pvc := makePVC(pvcConfig.name, config.ns, &storageClassName, "")
|
||||
if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create PVs
|
||||
for _, pvConfig := range test.pvs {
|
||||
pv := makePV(pvConfig.name, sharedClasses[pvConfig.scMode].Name, pvConfig.preboundPVC, config.ns, pvConfig.node)
|
||||
if _, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create pod
|
||||
if _, err := config.client.CoreV1().Pods(config.ns).Create(test.pod); err != nil {
|
||||
t.Fatalf("Failed to create Pod %q: %v", test.pod.Name, err)
|
||||
}
|
||||
|
||||
// Wait for pod is unschedulable.
|
||||
glog.Infof("Waiting for pod is unschedulable")
|
||||
if err := waitForPodUnschedulable(config.client, test.pod); err != nil {
|
||||
t.Errorf("Failed as Pod %s was not unschedulable: %v", test.pod.Name, err)
|
||||
}
|
||||
|
||||
// Trigger
|
||||
test.trigger(config)
|
||||
|
||||
// Wait for pod is scheduled or unscheduable.
|
||||
if !test.shouldFail {
|
||||
glog.Infof("Waiting for pod is scheduled")
|
||||
if err := waitForPodToSchedule(config.client, test.pod); err != nil {
|
||||
t.Errorf("Failed to schedule Pod %q: %v", test.pod.Name, err)
|
||||
}
|
||||
} else {
|
||||
glog.Infof("Waiting for pod is unschedulable")
|
||||
if err := waitForPodUnschedulable(config.client, test.pod); err != nil {
|
||||
t.Errorf("Failed as Pod %s was not unschedulable: %v", test.pod.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Force delete objects, but they still may not be immediately removed
|
||||
deleteTestObjects(config.client, config.ns, deleteOption)
|
||||
}
|
||||
}
|
||||
|
||||
// TestVolumeBindingStress creates <podLimit> pods, each with <volsPerPod> unbound PVCs.
|
||||
// PVs are precreated.
|
||||
func TestVolumeBindingStress(t *testing.T) {
|
||||
testVolumeBindingStress(t, 0, false, 0)
|
||||
}
|
||||
|
||||
// Like TestVolumeBindingStress but with scheduler resync. In real cluster,
|
||||
// scheduler will schedule failed pod frequently due to various events, e.g.
|
||||
// service/node update events.
|
||||
// This is useful to detect possible race conditions.
|
||||
func TestVolumeBindingStressWithSchedulerResync(t *testing.T) {
|
||||
testVolumeBindingStress(t, time.Second, false, 0)
|
||||
}
|
||||
|
||||
// Like TestVolumeBindingStress but with fast dynamic provisioning
|
||||
func TestVolumeBindingDynamicStressFast(t *testing.T) {
|
||||
testVolumeBindingStress(t, 0, true, 0)
|
||||
}
|
||||
|
||||
// Like TestVolumeBindingStress but with slow dynamic provisioning
|
||||
func TestVolumeBindingDynamicStressSlow(t *testing.T) {
|
||||
testVolumeBindingStress(t, 0, true, 30)
|
||||
}
|
||||
|
||||
func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration, dynamic bool, provisionDelaySeconds int) {
|
||||
features := map[string]bool{
|
||||
"VolumeScheduling": true,
|
||||
"PersistentLocalVolumes": true,
|
||||
}
|
||||
config := setupCluster(t, "volume-binding-stress-", 1, features, schedulerResyncPeriod, provisionDelaySeconds, false)
|
||||
defer config.teardown()
|
||||
|
||||
// Set max volume limit to the number of PVCs the test will create
|
||||
// TODO: remove when max volume limit allows setting through storageclass
|
||||
if err := os.Setenv(predicates.KubeMaxPDVols, fmt.Sprintf("%v", podLimit*volsPerPod)); err != nil {
|
||||
t.Fatalf("failed to set max pd limit: %v", err)
|
||||
}
|
||||
defer os.Unsetenv(predicates.KubeMaxPDVols)
|
||||
|
||||
scName := &classWait
|
||||
if dynamic {
|
||||
scName = &classDynamic
|
||||
sc := makeDynamicProvisionerStorageClass(*scName, &modeWait)
|
||||
if _, err := config.client.StorageV1().StorageClasses().Create(sc); err != nil {
|
||||
t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create enough PVs and PVCs for all the pods
|
||||
pvs := []*v1.PersistentVolume{}
|
||||
pvcs := []*v1.PersistentVolumeClaim{}
|
||||
for i := 0; i < podLimit*volsPerPod; i++ {
|
||||
pv := makePV(fmt.Sprintf("pv-stress-%v", i), classWait, "", "", node1)
|
||||
pvc := makePVC(fmt.Sprintf("pvc-stress-%v", i), config.ns, &classWait, "")
|
||||
|
||||
if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
// Don't create pvs for dynamic provisioning test
|
||||
if !dynamic {
|
||||
pv := makePV(fmt.Sprintf("pv-stress-%v", i), *scName, "", "", node1)
|
||||
if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
}
|
||||
pvs = append(pvs, pv)
|
||||
}
|
||||
|
||||
pvc := makePVC(fmt.Sprintf("pvc-stress-%v", i), config.ns, scName, "")
|
||||
if pvc, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
||||
}
|
||||
|
||||
pvs = append(pvs, pv)
|
||||
pvcs = append(pvcs, pvc)
|
||||
}
|
||||
|
||||
@@ -277,7 +464,7 @@ func TestVolumeBindingStress(t *testing.T) {
|
||||
podPvcs = append(podPvcs, pvcs[j].Name)
|
||||
}
|
||||
|
||||
pod := makePod(fmt.Sprintf("pod%v", i), config.ns, podPvcs)
|
||||
pod := makePod(fmt.Sprintf("pod%03d", i), config.ns, podPvcs)
|
||||
if pod, err := config.client.CoreV1().Pods(config.ns).Create(pod); err != nil {
|
||||
t.Fatalf("Failed to create Pod %q: %v", pod.Name, err)
|
||||
}
|
||||
@@ -288,7 +475,7 @@ func TestVolumeBindingStress(t *testing.T) {
|
||||
for _, pod := range pods {
|
||||
// Use increased timeout for stress test because there is a higher chance of
|
||||
// PV sync error
|
||||
if err := waitForPodToScheduleWithTimeout(config.client, pod, 60*time.Second); err != nil {
|
||||
if err := waitForPodToScheduleWithTimeout(config.client, pod, 2*time.Minute); err != nil {
|
||||
t.Errorf("Failed to schedule Pod %q: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
@@ -302,8 +489,142 @@ func TestVolumeBindingStress(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, numPVsFirstNode int) {
|
||||
features := map[string]bool{
|
||||
"VolumeScheduling": true,
|
||||
"PersistentLocalVolumes": true,
|
||||
}
|
||||
// TODO: disable equivalence cache until kubernetes/kubernetes#67680 is fixed
|
||||
config := setupCluster(t, "volume-pod-affinity-", numNodes, features, 0, 0, true)
|
||||
defer config.teardown()
|
||||
|
||||
pods := []*v1.Pod{}
|
||||
pvcs := []*v1.PersistentVolumeClaim{}
|
||||
pvs := []*v1.PersistentVolume{}
|
||||
|
||||
// Create PVs for the first node
|
||||
for i := 0; i < numPVsFirstNode; i++ {
|
||||
pv := makePV(fmt.Sprintf("pv-node1-%v", i), classWait, "", "", node1)
|
||||
if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
}
|
||||
pvs = append(pvs, pv)
|
||||
}
|
||||
|
||||
// Create 1 PV per Node for the remaining nodes
|
||||
for i := 2; i <= numNodes; i++ {
|
||||
pv := makePV(fmt.Sprintf("pv-node%v-0", i), classWait, "", "", fmt.Sprintf("node-%v", i))
|
||||
if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
}
|
||||
pvs = append(pvs, pv)
|
||||
}
|
||||
|
||||
// Create pods
|
||||
for i := 0; i < numPods; i++ {
|
||||
// Create one pvc per pod
|
||||
pvc := makePVC(fmt.Sprintf("pvc-%v", i), config.ns, &classWait, "")
|
||||
if pvc, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
||||
}
|
||||
pvcs = append(pvcs, pvc)
|
||||
|
||||
// Create pod with pod affinity
|
||||
pod := makePod(fmt.Sprintf("pod%03d", i), config.ns, []string{pvc.Name})
|
||||
pod.Spec.Affinity = &v1.Affinity{}
|
||||
affinityTerms := []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "app",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"volume-binding-test"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: nodeAffinityLabelKey,
|
||||
},
|
||||
}
|
||||
if anti {
|
||||
pod.Spec.Affinity.PodAntiAffinity = &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: affinityTerms,
|
||||
}
|
||||
} else {
|
||||
pod.Spec.Affinity.PodAffinity = &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: affinityTerms,
|
||||
}
|
||||
}
|
||||
|
||||
if pod, err := config.client.CoreV1().Pods(config.ns).Create(pod); err != nil {
|
||||
t.Fatalf("Failed to create Pod %q: %v", pod.Name, err)
|
||||
}
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
|
||||
// Validate Pods scheduled
|
||||
scheduledNodes := sets.NewString()
|
||||
for _, pod := range pods {
|
||||
if err := waitForPodToSchedule(config.client, pod); err != nil {
|
||||
t.Errorf("Failed to schedule Pod %q: %v", pod.Name, err)
|
||||
} else {
|
||||
// Keep track of all the nodes that the Pods were scheduled on
|
||||
pod, err = config.client.CoreV1().Pods(config.ns).Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get Pod %q: %v", pod.Name, err)
|
||||
}
|
||||
if pod.Spec.NodeName == "" {
|
||||
t.Fatalf("Pod %q node name unset after scheduling", pod.Name)
|
||||
}
|
||||
scheduledNodes.Insert(pod.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate the affinity policy
|
||||
if anti {
|
||||
// The pods should have been spread across different nodes
|
||||
if scheduledNodes.Len() != numPods {
|
||||
t.Errorf("Pods were scheduled across %v nodes instead of %v", scheduledNodes.Len(), numPods)
|
||||
}
|
||||
} else {
|
||||
// The pods should have been scheduled on 1 node
|
||||
if scheduledNodes.Len() != 1 {
|
||||
t.Errorf("Pods were scheduled across %v nodes instead of %v", scheduledNodes.Len(), 1)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate PVC binding
|
||||
for _, pvc := range pvcs {
|
||||
validatePVCPhase(t, config.client, pvc.Name, config.ns, v1.ClaimBound)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeBindingWithAntiAffinity(t *testing.T) {
|
||||
numNodes := 10
|
||||
// Create as many pods as number of nodes
|
||||
numPods := numNodes
|
||||
// Create many more PVs on node1 to increase chance of selecting node1
|
||||
numPVsFirstNode := 10 * numNodes
|
||||
|
||||
testVolumeBindingWithAffinity(t, true, numNodes, numPods, numPVsFirstNode)
|
||||
}
|
||||
|
||||
func TestVolumeBindingWithAffinity(t *testing.T) {
|
||||
numPods := 10
|
||||
// Create many more nodes to increase chance of selecting a PV on a different node than node1
|
||||
numNodes := 10 * numPods
|
||||
// Create numPods PVs on the first node
|
||||
numPVsFirstNode := numPods
|
||||
|
||||
testVolumeBindingWithAffinity(t, true, numNodes, numPods, numPVsFirstNode)
|
||||
}
|
||||
|
||||
func TestPVAffinityConflict(t *testing.T) {
|
||||
config := setupCluster(t, "volume-scheduling", 3)
|
||||
features := map[string]bool{
|
||||
"VolumeScheduling": true,
|
||||
"PersistentLocalVolumes": true,
|
||||
}
|
||||
config := setupCluster(t, "volume-scheduling-", 3, features, 0, 0, false)
|
||||
defer config.teardown()
|
||||
|
||||
pv := makePV("local-pv", classImmediate, "", "", node1)
|
||||
@@ -361,30 +682,52 @@ func TestPVAffinityConflict(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func setupCluster(t *testing.T, nsName string, numberOfNodes int) *testConfig {
|
||||
// Enable feature gates
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true,PersistentLocalVolumes=true")
|
||||
func setupCluster(t *testing.T, nsName string, numberOfNodes int, features map[string]bool, resyncPeriod time.Duration, provisionDelaySeconds int, disableEquivalenceCache bool) *testConfig {
|
||||
oldFeatures := make(map[string]bool, len(features))
|
||||
for feature := range features {
|
||||
oldFeatures[feature] = utilfeature.DefaultFeatureGate.Enabled(utilfeature.Feature(feature))
|
||||
}
|
||||
// Set feature gates
|
||||
utilfeature.DefaultFeatureGate.SetFromMap(features)
|
||||
|
||||
controllerCh := make(chan struct{})
|
||||
|
||||
context := initTestScheduler(t, initTestMaster(t, nsName, nil), controllerCh, false, nil)
|
||||
context := initTestSchedulerWithOptions(t, initTestMaster(t, nsName, nil), controllerCh, false, nil, false, disableEquivalenceCache, resyncPeriod)
|
||||
|
||||
clientset := context.clientSet
|
||||
ns := context.ns.Name
|
||||
informers := context.informerFactory
|
||||
// Informers factory for controllers, we disable resync period for testing.
|
||||
informerFactory := informers.NewSharedInformerFactory(context.clientSet, 0)
|
||||
|
||||
// Start PV controller for volume binding.
|
||||
host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)
|
||||
plugin := &volumetest.FakeVolumePlugin{
|
||||
PluginName: provisionerPluginName,
|
||||
Host: host,
|
||||
Config: volume.VolumeConfig{},
|
||||
LastProvisionerOptions: volume.VolumeOptions{},
|
||||
ProvisionDelaySeconds: provisionDelaySeconds,
|
||||
NewAttacherCallCount: 0,
|
||||
NewDetacherCallCount: 0,
|
||||
Mounters: nil,
|
||||
Unmounters: nil,
|
||||
Attachers: nil,
|
||||
Detachers: nil,
|
||||
}
|
||||
plugins := []volume.VolumePlugin{plugin}
|
||||
|
||||
controllerOptions := persistentvolumeoptions.NewPersistentVolumeControllerOptions()
|
||||
params := persistentvolume.ControllerParameters{
|
||||
KubeClient: clientset,
|
||||
SyncPeriod: time.Hour, // test shouldn't need to resync
|
||||
VolumePlugins: nil, // TODO; need later for dynamic provisioning
|
||||
SyncPeriod: controllerOptions.PVClaimBinderSyncPeriod,
|
||||
VolumePlugins: plugins,
|
||||
Cloud: nil,
|
||||
ClusterName: "volume-test-cluster",
|
||||
VolumeInformer: informers.Core().V1().PersistentVolumes(),
|
||||
ClaimInformer: informers.Core().V1().PersistentVolumeClaims(),
|
||||
ClassInformer: informers.Storage().V1().StorageClasses(),
|
||||
PodInformer: informers.Core().V1().Pods(),
|
||||
NodeInformer: informers.Core().V1().Nodes(),
|
||||
VolumeInformer: informerFactory.Core().V1().PersistentVolumes(),
|
||||
ClaimInformer: informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
ClassInformer: informerFactory.Storage().V1().StorageClasses(),
|
||||
PodInformer: informerFactory.Core().V1().Pods(),
|
||||
NodeInformer: informerFactory.Core().V1().Nodes(),
|
||||
EnableDynamicProvisioning: true,
|
||||
}
|
||||
ctrl, err := persistentvolume.NewController(params)
|
||||
@@ -392,6 +735,9 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int) *testConfig {
|
||||
t.Fatalf("Failed to create PV controller: %v", err)
|
||||
}
|
||||
go ctrl.Run(controllerCh)
|
||||
// Start informer factory after all controllers are configured and running.
|
||||
informerFactory.Start(controllerCh)
|
||||
informerFactory.WaitForCacheSync(controllerCh)
|
||||
|
||||
// Create shared objects
|
||||
// Create nodes
|
||||
@@ -422,11 +768,7 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int) *testConfig {
|
||||
}
|
||||
|
||||
// Create SCs
|
||||
scs := []*storagev1.StorageClass{
|
||||
makeStorageClass(classImmediate, &modeImmediate),
|
||||
makeStorageClass(classWait, &modeWait),
|
||||
}
|
||||
for _, sc := range scs {
|
||||
for _, sc := range sharedClasses {
|
||||
if _, err := clientset.StorageV1().StorageClasses().Create(sc); err != nil {
|
||||
t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err)
|
||||
}
|
||||
@@ -439,7 +781,8 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int) *testConfig {
|
||||
teardown: func() {
|
||||
deleteTestObjects(clientset, ns, nil)
|
||||
cleanupTest(t, context)
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false,LocalPersistentVolumes=false")
|
||||
// Restore feature gates
|
||||
utilfeature.DefaultFeatureGate.SetFromMap(oldFeatures)
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -461,6 +804,16 @@ func makeStorageClass(name string, mode *storagev1.VolumeBindingMode) *storagev1
|
||||
}
|
||||
}
|
||||
|
||||
func makeDynamicProvisionerStorageClass(name string, mode *storagev1.VolumeBindingMode) *storagev1.StorageClass {
|
||||
return &storagev1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Provisioner: provisionerPluginName,
|
||||
VolumeBindingMode: mode,
|
||||
}
|
||||
}
|
||||
|
||||
func makePV(name, scName, pvcName, ns, node string) *v1.PersistentVolume {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -543,12 +896,15 @@ func makePod(name, ns string, pvcs []string) *v1.Pod {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
Labels: map[string]string{
|
||||
"app": "volume-binding-test",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: "k8s.gcr.io/busybox:1.24",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", "while true; do sleep 1; done"},
|
||||
},
|
||||
|
Reference in New Issue
Block a user