Bumping k8s dependencies to 1.13
This commit is contained in:
58
vendor/k8s.io/kubernetes/test/e2e/scheduling/predicates.go
generated
vendored
58
vendor/k8s.io/kubernetes/test/e2e/scheduling/predicates.go
generated
vendored
@@ -47,6 +47,7 @@ var masterNodes sets.String
|
||||
|
||||
type pausePodConfig struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Affinity *v1.Affinity
|
||||
Annotations, Labels, NodeSelector map[string]string
|
||||
Resources *v1.ResourceRequirements
|
||||
@@ -60,12 +61,10 @@ type pausePodConfig struct {
|
||||
var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
var cs clientset.Interface
|
||||
var nodeList *v1.NodeList
|
||||
var systemPodsNo int
|
||||
var totalPodCapacity int64
|
||||
var RCName string
|
||||
var ns string
|
||||
f := framework.NewDefaultFramework("sched-pred")
|
||||
ignoreLabels := framework.ImagePullerLabels
|
||||
|
||||
AfterEach(func() {
|
||||
rc, err := cs.CoreV1().ReplicationControllers(ns).Get(RCName, metav1.GetOptions{})
|
||||
@@ -81,30 +80,12 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
ns = f.Namespace.Name
|
||||
nodeList = &v1.NodeList{}
|
||||
|
||||
framework.WaitForAllNodesHealthy(cs, time.Minute)
|
||||
framework.AllNodesReady(cs, time.Minute)
|
||||
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
|
||||
|
||||
err := framework.CheckTestingNSDeletedExcept(cs, ns)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Every test case in this suite assumes that cluster add-on pods stay stable and
|
||||
// cannot be run in parallel with any other test that touches Nodes or Pods.
|
||||
// It is so because we need to have precise control on what's running in the cluster.
|
||||
systemPods, err := framework.GetPodsInNamespace(cs, ns, ignoreLabels)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
systemPodsNo = 0
|
||||
for _, pod := range systemPods {
|
||||
if !masterNodes.Has(pod.Spec.NodeName) && pod.DeletionTimestamp == nil {
|
||||
systemPodsNo++
|
||||
}
|
||||
}
|
||||
|
||||
err = framework.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, ignoreLabels)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForPodsSuccess(cs, metav1.NamespaceSystem, framework.ImagePullerLabels, framework.ImagePrePullingTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
for _, node := range nodeList.Items {
|
||||
framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
|
||||
framework.PrintAllKubeletPods(cs, node.Name)
|
||||
@@ -238,9 +219,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
// 4. Create another pod with no affinity to any node that need 50% of the largest node CPU.
|
||||
// 5. Make sure this additional pod is not scheduled.
|
||||
/*
|
||||
Testname: scheduler-resource-limits
|
||||
Description: Ensure that scheduler accounts node resources correctly
|
||||
and respects pods' resource requirements during scheduling.
|
||||
Release : v1.9
|
||||
Testname: Scheduler, resource limits
|
||||
Description: Scheduling Pods MUST fail if the resource limits exceed Machine capacity.
|
||||
*/
|
||||
framework.ConformanceIt("validates resource limits of pods that are allowed to run ", func() {
|
||||
framework.WaitForStableCluster(cs, masterNodes)
|
||||
@@ -344,9 +325,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
// Test Nodes does not have any label, hence it should be impossible to schedule Pod with
|
||||
// nonempty Selector set.
|
||||
/*
|
||||
Testname: scheduler-node-selector-not-matching
|
||||
Description: Ensure that scheduler respects the NodeSelector field of
|
||||
PodSpec during scheduling (when it does not match any node).
|
||||
Release : v1.9
|
||||
Testname: Scheduler, node selector not matching
|
||||
Description: Create a Pod with a NodeSelector set to a value that does not match a node in the cluster. Since there are no nodes matching the criteria the Pod MUST not be scheduled.
|
||||
*/
|
||||
framework.ConformanceIt("validates that NodeSelector is respected if not matching ", func() {
|
||||
By("Trying to schedule Pod with nonempty NodeSelector.")
|
||||
@@ -367,9 +348,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: scheduler-node-selector-matching
|
||||
Description: Ensure that scheduler respects the NodeSelector field
|
||||
of PodSpec during scheduling (when it matches).
|
||||
Release : v1.9
|
||||
Testname: Scheduler, node selector matching
|
||||
Description: Create a label on the node {k: v}. Then create a Pod with a NodeSelector set to {k: v}. Check to see if the Pod is scheduled. When the NodeSelector matches then Pod MUST be scheduled on that node.
|
||||
*/
|
||||
framework.ConformanceIt("validates that NodeSelector is respected if matching ", func() {
|
||||
nodeName := GetNodeThatCanRunPod(f)
|
||||
@@ -622,9 +603,11 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
})
|
||||
|
||||
func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
|
||||
var gracePeriod = int64(1)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: conf.Name,
|
||||
Namespace: conf.Namespace,
|
||||
Labels: conf.Labels,
|
||||
Annotations: conf.Annotations,
|
||||
OwnerReferences: conf.OwnerReferences,
|
||||
@@ -639,9 +622,10 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
|
||||
Ports: conf.Ports,
|
||||
},
|
||||
},
|
||||
Tolerations: conf.Tolerations,
|
||||
NodeName: conf.NodeName,
|
||||
PriorityClassName: conf.PriorityClassName,
|
||||
Tolerations: conf.Tolerations,
|
||||
NodeName: conf.NodeName,
|
||||
PriorityClassName: conf.PriorityClassName,
|
||||
TerminationGracePeriodSeconds: &gracePeriod,
|
||||
},
|
||||
}
|
||||
if conf.Resources != nil {
|
||||
@@ -651,7 +635,11 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
|
||||
}
|
||||
|
||||
func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(initPausePod(f, conf))
|
||||
namespace := conf.Namespace
|
||||
if len(namespace) == 0 {
|
||||
namespace = f.Namespace.Name
|
||||
}
|
||||
pod, err := f.ClientSet.CoreV1().Pods(namespace).Create(initPausePod(f, conf))
|
||||
framework.ExpectNoError(err)
|
||||
return pod
|
||||
}
|
||||
@@ -659,7 +647,7 @@ func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
|
||||
func runPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
|
||||
pod := createPausePod(f, conf)
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(conf.Name, metav1.GetOptions{})
|
||||
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(conf.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
return pod
|
||||
}
|
||||
|
Reference in New Issue
Block a user