Bumping k8s dependencies to 1.13
This commit is contained in:
64
vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption_test.go
generated
vendored
64
vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption_test.go
generated
vendored
@@ -23,13 +23,17 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/record"
|
||||
kubeapi "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/scheduling"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
const (
|
||||
critical = "critical"
|
||||
clusterCritical = "cluster-critical"
|
||||
nodeCritical = "node-critical"
|
||||
bestEffort = "bestEffort"
|
||||
burstable = "burstable"
|
||||
highRequestBurstable = "high-request-burstable"
|
||||
@@ -85,6 +89,9 @@ func getTestCriticalPodAdmissionHandler(podProvider *fakePodProvider, podKiller
|
||||
}
|
||||
|
||||
func TestEvictPodsToFreeRequests(t *testing.T) {
|
||||
if err := utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=true"); err != nil {
|
||||
t.Errorf("failed to set ExperimentalCriticalPodAnnotation to true: %v", err)
|
||||
}
|
||||
type testRun struct {
|
||||
testName string
|
||||
inputPods []*v1.Pod
|
||||
@@ -123,7 +130,7 @@ func TestEvictPodsToFreeRequests(t *testing.T) {
|
||||
}
|
||||
for _, r := range runs {
|
||||
podProvider.setPods(r.inputPods)
|
||||
outErr := criticalPodAdmissionHandler.evictPodsToFreeRequests(r.insufficientResources)
|
||||
outErr := criticalPodAdmissionHandler.evictPodsToFreeRequests(allPods[critical], r.insufficientResources)
|
||||
outputPods := podKiller.getKilledPods()
|
||||
if !r.expectErr && outErr != nil {
|
||||
t.Errorf("evictPodsToFreeRequests returned an unexpected error during the %s test. Err: %v", r.testName, outErr)
|
||||
@@ -143,7 +150,7 @@ func BenchmarkGetPodsToPreempt(t *testing.B) {
|
||||
inputPods = append(inputPods, allPods[tinyBurstable])
|
||||
}
|
||||
for n := 0; n < t.N; n++ {
|
||||
getPodsToPreempt(inputPods, admissionRequirementList([]*admissionRequirement{
|
||||
getPodsToPreempt(nil, inputPods, admissionRequirementList([]*admissionRequirement{
|
||||
{
|
||||
resourceName: v1.ResourceCPU,
|
||||
quantity: parseCPUToInt64("110m"),
|
||||
@@ -154,6 +161,7 @@ func BenchmarkGetPodsToPreempt(t *testing.B) {
|
||||
func TestGetPodsToPreempt(t *testing.T) {
|
||||
type testRun struct {
|
||||
testName string
|
||||
preemptor *v1.Pod
|
||||
inputPods []*v1.Pod
|
||||
insufficientResources admissionRequirementList
|
||||
expectErr bool
|
||||
@@ -163,6 +171,7 @@ func TestGetPodsToPreempt(t *testing.T) {
|
||||
runs := []testRun{
|
||||
{
|
||||
testName: "no requirements",
|
||||
preemptor: allPods[critical],
|
||||
inputPods: []*v1.Pod{},
|
||||
insufficientResources: getAdmissionRequirementList(0, 0, 0),
|
||||
expectErr: false,
|
||||
@@ -170,6 +179,7 @@ func TestGetPodsToPreempt(t *testing.T) {
|
||||
},
|
||||
{
|
||||
testName: "no pods",
|
||||
preemptor: allPods[critical],
|
||||
inputPods: []*v1.Pod{},
|
||||
insufficientResources: getAdmissionRequirementList(0, 0, 1),
|
||||
expectErr: true,
|
||||
@@ -177,6 +187,7 @@ func TestGetPodsToPreempt(t *testing.T) {
|
||||
},
|
||||
{
|
||||
testName: "equal pods and resources requirements",
|
||||
preemptor: allPods[critical],
|
||||
inputPods: []*v1.Pod{allPods[burstable]},
|
||||
insufficientResources: getAdmissionRequirementList(100, 100, 1),
|
||||
expectErr: false,
|
||||
@@ -184,6 +195,7 @@ func TestGetPodsToPreempt(t *testing.T) {
|
||||
},
|
||||
{
|
||||
testName: "higher requirements than pod requests",
|
||||
preemptor: allPods[critical],
|
||||
inputPods: []*v1.Pod{allPods[burstable]},
|
||||
insufficientResources: getAdmissionRequirementList(200, 200, 2),
|
||||
expectErr: true,
|
||||
@@ -191,6 +203,7 @@ func TestGetPodsToPreempt(t *testing.T) {
|
||||
},
|
||||
{
|
||||
testName: "choose between bestEffort and burstable",
|
||||
preemptor: allPods[critical],
|
||||
inputPods: []*v1.Pod{allPods[burstable], allPods[bestEffort]},
|
||||
insufficientResources: getAdmissionRequirementList(0, 0, 1),
|
||||
expectErr: false,
|
||||
@@ -198,6 +211,7 @@ func TestGetPodsToPreempt(t *testing.T) {
|
||||
},
|
||||
{
|
||||
testName: "choose between burstable and guaranteed",
|
||||
preemptor: allPods[critical],
|
||||
inputPods: []*v1.Pod{allPods[burstable], allPods[guaranteed]},
|
||||
insufficientResources: getAdmissionRequirementList(0, 0, 1),
|
||||
expectErr: false,
|
||||
@@ -205,6 +219,7 @@ func TestGetPodsToPreempt(t *testing.T) {
|
||||
},
|
||||
{
|
||||
testName: "choose lower request burstable if it meets requirements",
|
||||
preemptor: allPods[critical],
|
||||
inputPods: []*v1.Pod{allPods[bestEffort], allPods[highRequestBurstable], allPods[burstable]},
|
||||
insufficientResources: getAdmissionRequirementList(100, 100, 0),
|
||||
expectErr: false,
|
||||
@@ -212,6 +227,7 @@ func TestGetPodsToPreempt(t *testing.T) {
|
||||
},
|
||||
{
|
||||
testName: "choose higher request burstable if lower does not meet requirements",
|
||||
preemptor: allPods[critical],
|
||||
inputPods: []*v1.Pod{allPods[bestEffort], allPods[burstable], allPods[highRequestBurstable]},
|
||||
insufficientResources: getAdmissionRequirementList(150, 150, 0),
|
||||
expectErr: false,
|
||||
@@ -219,6 +235,7 @@ func TestGetPodsToPreempt(t *testing.T) {
|
||||
},
|
||||
{
|
||||
testName: "multiple pods required",
|
||||
preemptor: allPods[critical],
|
||||
inputPods: []*v1.Pod{allPods[bestEffort], allPods[burstable], allPods[highRequestBurstable], allPods[guaranteed], allPods[highRequestGuaranteed]},
|
||||
insufficientResources: getAdmissionRequirementList(350, 350, 0),
|
||||
expectErr: false,
|
||||
@@ -226,14 +243,32 @@ func TestGetPodsToPreempt(t *testing.T) {
|
||||
},
|
||||
{
|
||||
testName: "evict guaranteed when we have to, and dont evict the extra burstable",
|
||||
preemptor: allPods[critical],
|
||||
inputPods: []*v1.Pod{allPods[bestEffort], allPods[burstable], allPods[highRequestBurstable], allPods[guaranteed], allPods[highRequestGuaranteed]},
|
||||
insufficientResources: getAdmissionRequirementList(0, 550, 0),
|
||||
expectErr: false,
|
||||
expectedOutput: []*v1.Pod{allPods[highRequestBurstable], allPods[highRequestGuaranteed]},
|
||||
},
|
||||
{
|
||||
testName: "evict cluster critical pod for node critical pod",
|
||||
preemptor: allPods[nodeCritical],
|
||||
inputPods: []*v1.Pod{allPods[clusterCritical]},
|
||||
insufficientResources: getAdmissionRequirementList(100, 0, 0),
|
||||
expectErr: false,
|
||||
expectedOutput: []*v1.Pod{allPods[clusterCritical]},
|
||||
},
|
||||
{
|
||||
testName: "can not evict node critical pod for cluster critical pod",
|
||||
preemptor: allPods[clusterCritical],
|
||||
inputPods: []*v1.Pod{allPods[nodeCritical]},
|
||||
insufficientResources: getAdmissionRequirementList(100, 0, 0),
|
||||
expectErr: true,
|
||||
expectedOutput: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, r := range runs {
|
||||
outputPods, outErr := getPodsToPreempt(r.inputPods, r.insufficientResources)
|
||||
outputPods, outErr := getPodsToPreempt(r.preemptor, r.inputPods, r.insufficientResources)
|
||||
if !r.expectErr && outErr != nil {
|
||||
t.Errorf("getPodsToPreempt returned an unexpected error during the %s test. Err: %v", r.testName, outErr)
|
||||
} else if r.expectErr && outErr == nil {
|
||||
@@ -349,6 +384,18 @@ func getTestPods() map[string]*v1.Pod {
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
},
|
||||
}),
|
||||
clusterCritical: getPodWithResources(clusterCritical, v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
},
|
||||
}),
|
||||
nodeCritical: getPodWithResources(nodeCritical, v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
},
|
||||
}),
|
||||
burstable: getPodWithResources(burstable, v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
@@ -384,6 +431,17 @@ func getTestPods() map[string]*v1.Pod {
|
||||
}
|
||||
allPods[critical].Namespace = kubeapi.NamespaceSystem
|
||||
allPods[critical].Annotations[kubetypes.CriticalPodAnnotationKey] = ""
|
||||
|
||||
allPods[clusterCritical].Namespace = kubeapi.NamespaceSystem
|
||||
allPods[clusterCritical].Spec.PriorityClassName = scheduling.SystemClusterCritical
|
||||
clusterPriority := scheduling.SystemCriticalPriority
|
||||
allPods[clusterCritical].Spec.Priority = &clusterPriority
|
||||
|
||||
allPods[nodeCritical].Namespace = kubeapi.NamespaceSystem
|
||||
allPods[nodeCritical].Spec.PriorityClassName = scheduling.SystemNodeCritical
|
||||
nodePriority := scheduling.SystemCriticalPriority + 100
|
||||
allPods[nodeCritical].Spec.Priority = &nodePriority
|
||||
|
||||
return allPods
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user