Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -11,7 +11,6 @@ go_library(
"predicates.go",
"preemption.go",
"priorities.go",
"rescheduler.go",
"resource_quota.go",
"ubernetes_lite.go",
"ubernetes_lite_volumes.go",
@@ -19,7 +18,6 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/scheduling",
visibility = ["//visibility:public"],
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/apis/scheduling:go_default_library",
@@ -27,6 +25,19 @@ go_library(
"//pkg/quota/evaluator/core:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
"//pkg/util/version:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/scheduling/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
@@ -35,19 +46,6 @@ go_library(
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/google.golang.org/api/compute/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/scheduling/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
@@ -56,17 +54,17 @@ go_test(
srcs = ["taints_test.go"],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
],
)

View File

@@ -35,6 +35,10 @@ import (
_ "github.com/stretchr/testify/assert"
)
const (
defaultTimeout = 3 * time.Minute
)
var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
var cs clientset.Interface
var nodeList *v1.NodeList
@@ -42,7 +46,6 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
var systemPodsNo int
var ns string
f := framework.NewDefaultFramework("equivalence-cache")
ignoreLabels := framework.ImagePullerLabels
BeforeEach(func() {
cs = f.ClientSet
@@ -56,7 +59,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
// Every test case in this suite assumes that cluster add-on pods stay stable and
// cannot be run in parallel with any other test that touches Nodes or Pods.
// It is so because we need to have precise control on what's running in the cluster.
systemPods, err := framework.GetPodsInNamespace(cs, ns, ignoreLabels)
systemPods, err := framework.GetPodsInNamespace(cs, ns, map[string]string{})
Expect(err).NotTo(HaveOccurred())
systemPodsNo = 0
for _, pod := range systemPods {
@@ -65,7 +68,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
}
}
err = framework.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), int32(systemPodsNo), framework.PodReadyBeforeTimeout, ignoreLabels)
err = framework.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), int32(systemPodsNo), framework.PodReadyBeforeTimeout, map[string]string{})
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeList.Items {

View File

@@ -18,6 +18,7 @@ package scheduling
import (
"fmt"
"reflect"
"time"
"k8s.io/api/core/v1"
@@ -81,6 +82,7 @@ var _ = SIGDescribe("LimitRange", func() {
By("Fetching the LimitRange to ensure it has proper values")
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit}
actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default}
err = equalResourceRequirement(expected, actual)
@@ -140,6 +142,13 @@ var _ = SIGDescribe("LimitRange", func() {
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Update(limitRange)
Expect(err).NotTo(HaveOccurred())
By("Verifying LimitRange updating is effective")
Expect(wait.Poll(time.Second*2, time.Second*20, func() (bool, error) {
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
return reflect.DeepEqual(limitRange.Spec.Limits[0].Min, newMin), nil
})).NotTo(HaveOccurred())
By("Creating a Pod with less than former min resources")
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)

View File

@@ -18,7 +18,6 @@ package scheduling
import (
"os"
"strings"
"time"
"k8s.io/api/core/v1"
@@ -35,7 +34,6 @@ import (
const (
testPodNamePrefix = "nvidia-gpu-"
cosOSImage = "Container-Optimized OS from Google"
// Nvidia driver installation can take upwards of 5 minutes.
driverInstallTimeout = 10 * time.Minute
)
@@ -69,15 +67,12 @@ func makeCudaAdditionDevicePluginTestPod() *v1.Pod {
return testPod
}
func isClusterRunningCOS(f *framework.Framework) bool {
func logOSImages(f *framework.Framework) {
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err, "getting node list")
for _, node := range nodeList.Items {
if !strings.Contains(node.Status.NodeInfo.OSImage, cosOSImage) {
return false
}
framework.Logf("Nodename: %v, OS Image: %v", node.Name, node.Status.NodeInfo.OSImage)
}
return true
}
func areGPUsAvailableOnAllSchedulableNodes(f *framework.Framework) bool {
@@ -111,14 +106,7 @@ func getGPUsAvailable(f *framework.Framework) int64 {
}
func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *framework.ContainerResourceGatherer {
// Skip the test if the base image is not COS.
// TODO: Add support for other base images.
// CUDA apps require host mounts which is not portable across base images (yet).
framework.Logf("Checking base image")
if !isClusterRunningCOS(f) {
Skip("Nvidia GPU tests are supproted only on Container Optimized OS image currently")
}
framework.Logf("Cluster is running on COS. Proceeding with test")
logOSImages(f)
dsYamlUrlFromEnv := os.Getenv("NVIDIA_DRIVER_INSTALLER_DAEMONSET")
if dsYamlUrlFromEnv != "" {
@@ -163,7 +151,7 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
return rsgather
}
func testNvidiaGPUsOnCOS(f *framework.Framework) {
func testNvidiaGPUs(f *framework.Framework) {
rsgather := SetupNVIDIAGPUNode(f, true)
framework.Logf("Creating as many pods as there are Nvidia GPUs and have the pods run a CUDA app")
podList := []*v1.Pod{}
@@ -186,7 +174,7 @@ func testNvidiaGPUsOnCOS(f *framework.Framework) {
var _ = SIGDescribe("[Feature:GPUDevicePlugin]", func() {
f := framework.NewDefaultFramework("device-plugin-gpus")
It("run Nvidia GPU Device Plugin tests on Container Optimized OS only", func() {
testNvidiaGPUsOnCOS(f)
It("run Nvidia GPU Device Plugin tests", func() {
testNvidiaGPUs(f)
})
})

View File

@@ -47,6 +47,7 @@ var masterNodes sets.String
type pausePodConfig struct {
Name string
Namespace string
Affinity *v1.Affinity
Annotations, Labels, NodeSelector map[string]string
Resources *v1.ResourceRequirements
@@ -60,12 +61,10 @@ type pausePodConfig struct {
var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
var cs clientset.Interface
var nodeList *v1.NodeList
var systemPodsNo int
var totalPodCapacity int64
var RCName string
var ns string
f := framework.NewDefaultFramework("sched-pred")
ignoreLabels := framework.ImagePullerLabels
AfterEach(func() {
rc, err := cs.CoreV1().ReplicationControllers(ns).Get(RCName, metav1.GetOptions{})
@@ -81,30 +80,12 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
ns = f.Namespace.Name
nodeList = &v1.NodeList{}
framework.WaitForAllNodesHealthy(cs, time.Minute)
framework.AllNodesReady(cs, time.Minute)
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
err := framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
// Every test case in this suite assumes that cluster add-on pods stay stable and
// cannot be run in parallel with any other test that touches Nodes or Pods.
// It is so because we need to have precise control on what's running in the cluster.
systemPods, err := framework.GetPodsInNamespace(cs, ns, ignoreLabels)
Expect(err).NotTo(HaveOccurred())
systemPodsNo = 0
for _, pod := range systemPods {
if !masterNodes.Has(pod.Spec.NodeName) && pod.DeletionTimestamp == nil {
systemPodsNo++
}
}
err = framework.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, ignoreLabels)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPodsSuccess(cs, metav1.NamespaceSystem, framework.ImagePullerLabels, framework.ImagePrePullingTimeout)
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeList.Items {
framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
framework.PrintAllKubeletPods(cs, node.Name)
@@ -238,9 +219,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
// 4. Create another pod with no affinity to any node that need 50% of the largest node CPU.
// 5. Make sure this additional pod is not scheduled.
/*
Testname: scheduler-resource-limits
Description: Ensure that scheduler accounts node resources correctly
and respects pods' resource requirements during scheduling.
Release : v1.9
Testname: Scheduler, resource limits
Description: Scheduling Pods MUST fail if the resource limits exceed Machine capacity.
*/
framework.ConformanceIt("validates resource limits of pods that are allowed to run ", func() {
framework.WaitForStableCluster(cs, masterNodes)
@@ -344,9 +325,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
// Test Nodes does not have any label, hence it should be impossible to schedule Pod with
// nonempty Selector set.
/*
Testname: scheduler-node-selector-not-matching
Description: Ensure that scheduler respects the NodeSelector field of
PodSpec during scheduling (when it does not match any node).
Release : v1.9
Testname: Scheduler, node selector not matching
Description: Create a Pod with a NodeSelector set to a value that does not match a node in the cluster. Since there are no nodes matching the criteria the Pod MUST not be scheduled.
*/
framework.ConformanceIt("validates that NodeSelector is respected if not matching ", func() {
By("Trying to schedule Pod with nonempty NodeSelector.")
@@ -367,9 +348,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
})
/*
Testname: scheduler-node-selector-matching
Description: Ensure that scheduler respects the NodeSelector field
of PodSpec during scheduling (when it matches).
Release : v1.9
Testname: Scheduler, node selector matching
Description: Create a label on the node {k: v}. Then create a Pod with a NodeSelector set to {k: v}. Check to see if the Pod is scheduled. When the NodeSelector matches then Pod MUST be scheduled on that node.
*/
framework.ConformanceIt("validates that NodeSelector is respected if matching ", func() {
nodeName := GetNodeThatCanRunPod(f)
@@ -622,9 +603,11 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
})
func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
var gracePeriod = int64(1)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: conf.Name,
Namespace: conf.Namespace,
Labels: conf.Labels,
Annotations: conf.Annotations,
OwnerReferences: conf.OwnerReferences,
@@ -639,9 +622,10 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
Ports: conf.Ports,
},
},
Tolerations: conf.Tolerations,
NodeName: conf.NodeName,
PriorityClassName: conf.PriorityClassName,
Tolerations: conf.Tolerations,
NodeName: conf.NodeName,
PriorityClassName: conf.PriorityClassName,
TerminationGracePeriodSeconds: &gracePeriod,
},
}
if conf.Resources != nil {
@@ -651,7 +635,11 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
}
func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(initPausePod(f, conf))
namespace := conf.Namespace
if len(namespace) == 0 {
namespace = f.Namespace.Name
}
pod, err := f.ClientSet.CoreV1().Pods(namespace).Create(initPausePod(f, conf))
framework.ExpectNoError(err)
return pod
}
@@ -659,7 +647,7 @@ func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
func runPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
pod := createPausePod(f, conf)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(conf.Name, metav1.GetOptions{})
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(conf.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
return pod
}

View File

@@ -34,7 +34,7 @@ import (
_ "github.com/stretchr/testify/assert"
)
var _ = SIGDescribe("SchedulerPreemption [Serial] [Feature:PodPreemption]", func() {
var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
var cs clientset.Interface
var nodeList *v1.NodeList
var ns string
@@ -167,6 +167,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial] [Feature:PodPreemption]", func
// Create a critical pod and make sure it is scheduled.
runPausePod(f, pausePodConfig{
Name: "critical-pod",
Namespace: metav1.NamespaceSystem,
PriorityClassName: scheduling.SystemClusterCritical,
Resources: &v1.ResourceRequirements{
Requests: podRes,
@@ -183,6 +184,9 @@ var _ = SIGDescribe("SchedulerPreemption [Serial] [Feature:PodPreemption]", func
framework.ExpectNoError(err)
Expect(livePod.DeletionTimestamp).To(BeNil())
}
// Clean-up the critical pod
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete("critical-pod", metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
})
// This test verifies that when a high priority pod is pending and its
@@ -311,7 +315,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial] [Feature:PodPreemption]", func
})
})
var _ = SIGDescribe("PodPriorityResolution [Serial] [Feature:PodPreemption]", func() {
var _ = SIGDescribe("PodPriorityResolution [Serial]", func() {
var cs clientset.Interface
var ns string
f := framework.NewDefaultFramework("sched-pod-priority")
@@ -334,10 +338,14 @@ var _ = SIGDescribe("PodPriorityResolution [Serial] [Feature:PodPreemption]", fu
for i, spc := range systemPriorityClasses {
pod := createPausePod(f, pausePodConfig{
Name: fmt.Sprintf("pod%d-%v", i, spc),
Namespace: metav1.NamespaceSystem,
PriorityClassName: spc,
})
Expect(pod.Spec.Priority).NotTo(BeNil())
framework.Logf("Created pod: %v", pod.Name)
// Clean-up the pod.
err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
}
})
})

View File

@@ -63,7 +63,6 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
var systemPodsNo int
var ns string
f := framework.NewDefaultFramework("sched-priority")
ignoreLabels := framework.ImagePullerLabels
AfterEach(func() {
})
@@ -78,7 +77,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
err := framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
err = framework.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, ignoreLabels)
err = framework.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, map[string]string{})
Expect(err).NotTo(HaveOccurred())
})

View File

@@ -1,133 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"fmt"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
defaultTimeout = 3 * time.Minute
)
// This test requires Rescheduler to be enabled.
var _ = SIGDescribe("Rescheduler [Serial]", func() {
f := framework.NewDefaultFramework("rescheduler")
var ns string
var totalMillicores int
BeforeEach(func() {
framework.Skipf("Rescheduler is being deprecated soon in Kubernetes 1.10")
ns = f.Namespace.Name
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount := len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
totalMillicores = int((&cpu).MilliValue()) * nodeCount
})
It("should ensure that critical pod is scheduled in case there is no resources available", func() {
By("reserving all available cpu")
err := reserveAllCpu(f, "reserve-all-cpu", totalMillicores)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, "reserve-all-cpu")
framework.ExpectNoError(err)
By("creating a new instance of Dashboard and waiting for Dashboard to be scheduled")
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kubernetes-dashboard"}))
listOpts := metav1.ListOptions{LabelSelector: label.String()}
deployments, err := f.ClientSet.ExtensionsV1beta1().Deployments(metav1.NamespaceSystem).List(listOpts)
framework.ExpectNoError(err)
Expect(len(deployments.Items)).Should(Equal(1))
deployment := deployments.Items[0]
replicas := uint(*(deployment.Spec.Replicas))
err = framework.ScaleDeployment(f.ClientSet, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas+1, true)
defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas, true))
framework.ExpectNoError(err)
})
})
func reserveAllCpu(f *framework.Framework, id string, millicores int) error {
timeout := 5 * time.Minute
replicas := millicores / 100
reserveCpu(f, id, 1, 100)
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, id, uint(replicas), false))
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
pods, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, framework.ImagePullerLabels)
if err != nil {
return err
}
if len(pods) != replicas {
continue
}
allRunningOrUnschedulable := true
for _, pod := range pods {
if !podRunningOrUnschedulable(pod) {
allRunningOrUnschedulable = false
break
}
}
if allRunningOrUnschedulable {
return nil
}
}
return fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", id, timeout, replicas)
}
func podRunningOrUnschedulable(pod *v1.Pod) bool {
_, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
if cond != nil && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" {
return true
}
running, _ := testutils.PodRunningReady(pod)
return running
}
func reserveCpu(f *framework.Framework, id string, replicas, millicores int) {
By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
request := int64(millicores / replicas)
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: f.Namespace.Name,
Timeout: defaultTimeout,
Image: imageutils.GetPauseImageName(),
Replicas: replicas,
CpuRequest: request,
}
framework.ExpectNoError(framework.RunRC(*config))
}

View File

@@ -22,6 +22,7 @@ import (
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/api/scheduling/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -731,8 +732,539 @@ var _ = SIGDescribe("ResourceQuota", func() {
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
})
})
var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
f := framework.NewDefaultFramework("scope-selectors")
It("should verify ResourceQuota with best effort scope using scope-selectors.", func() {
By("Creating a ResourceQuota with best effort scope")
resourceQuotaBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-besteffort", v1.ResourceQuotaScopeBestEffort))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a ResourceQuota with not best effort scope")
resourceQuotaNotBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-not-besteffort", v1.ResourceQuotaScopeNotBestEffort))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a best-effort pod")
pod := newTestPodForQuota(f, podName, v1.ResourceList{}, v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with best effort scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not best effort ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a not best-effort pod")
requests := v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("200Mi")
limits := v1.ResourceList{}
limits[v1.ResourceCPU] = resource.MustParse("1")
limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod = newTestPodForQuota(f, "burstable-pod", requests, limits)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not best effort scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with best effort scope ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should verify ResourceQuota with terminating scopes through scope selectors.", func() {
By("Creating a ResourceQuota with terminating scope")
quotaTerminatingName := "quota-terminating"
resourceQuotaTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector(quotaTerminatingName, v1.ResourceQuotaScopeTerminating))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a ResourceQuota with not terminating scope")
quotaNotTerminatingName := "quota-not-terminating"
resourceQuotaNotTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector(quotaNotTerminatingName, v1.ResourceQuotaScopeNotTerminating))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a long running pod")
podName := "test-pod"
requests := v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("200Mi")
limits := v1.ResourceList{}
limits[v1.ResourceCPU] = resource.MustParse("1")
limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod := newTestPodForQuota(f, podName, requests, limits)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not terminating scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with terminating scope ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a terminating pod")
podName = "terminating-pod"
pod = newTestPodForQuota(f, podName, requests, limits)
activeDeadlineSeconds := int64(3600)
pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with terminating scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not terminating scope ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
})
})
var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
f := framework.NewDefaultFramework("resourcequota-priorityclass")
It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class.", func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&v1beta1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass1"}, Value: int32(1000)})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass1"}))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a pod with priority class")
podName := "testpod-pclass1"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass1")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with priority class scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class.", func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&v1beta1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass2"}, Value: int32(1000)})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass2"}))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating first pod with priority class should pass")
podName := "testpod-pclass2-1"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass2")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with priority class scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating 2nd pod with priority class should fail")
podName2 := "testpod-pclass2-2"
pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass2")
pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod2)
Expect(err).To(HaveOccurred())
By("Deleting first pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class.", func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&v1beta1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass3"}, Value: int32(1000)})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass4"}))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a pod with priority class with pclass3")
podName := "testpod-pclass3-1"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass3")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with priority class scope remains same")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a 2nd pod with priority class pclass3")
podName2 := "testpod-pclass2-2"
pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass3")
pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod2)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with priority class scope remains same")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting both pods")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod2.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
})
It("should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes.", func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&v1beta1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass5"}, Value: int32(1000)})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
_, err = f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&v1beta1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass6"}, Value: int32(1000)})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("2")
By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass5", "pclass6"}))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a pod with priority class pclass5")
podName := "testpod-pclass5"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass5")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with priority class is updated with the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating 2nd pod with priority class pclass6")
podName2 := "testpod-pclass6"
pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass6")
pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod2)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with priority class scope is updated with the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("2")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting both pods")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod2.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn).", func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&v1beta1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass7"}, Value: int32(1000)})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpNotIn, []string{"pclass7"}))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a pod with priority class pclass7")
podName := "testpod-pclass7"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass7")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with priority class is not used")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
})
It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists).", func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&v1beta1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass8"}, Value: int32(1000)})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpExists, []string{}))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a pod with priority class pclass8")
podName := "testpod-pclass8"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass8")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with priority class is updated with the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class.", func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&v1beta1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass9"}, Value: int32(1000)})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
hard[v1.ResourceRequestsCPU] = resource.MustParse("1")
hard[v1.ResourceRequestsMemory] = resource.MustParse("1Gi")
hard[v1.ResourceLimitsCPU] = resource.MustParse("3")
hard[v1.ResourceLimitsMemory] = resource.MustParse("3Gi")
By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass9"}))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0Gi")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0Gi")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a pod with priority class")
podName := "testpod-pclass9"
request := v1.ResourceList{}
request[v1.ResourceCPU] = resource.MustParse("1")
request[v1.ResourceMemory] = resource.MustParse("1Gi")
limit := v1.ResourceList{}
limit[v1.ResourceCPU] = resource.MustParse("2")
limit[v1.ResourceMemory] = resource.MustParse("2Gi")
pod := newTestPodForQuotaWithPriority(f, podName, request, limit, "pclass9")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with priority class scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("1")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("1Gi")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("2")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("2Gi")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0Gi")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0Gi")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
})
})
// newTestResourceQuotaWithScopeSelector returns a quota that enforces default constraints for testing with scopeSelectors
func newTestResourceQuotaWithScopeSelector(name string, scope v1.ResourceQuotaScope) *v1.ResourceQuota {
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("5")
switch scope {
case v1.ResourceQuotaScopeTerminating, v1.ResourceQuotaScopeNotTerminating:
hard[v1.ResourceRequestsCPU] = resource.MustParse("1")
hard[v1.ResourceRequestsMemory] = resource.MustParse("500Mi")
hard[v1.ResourceLimitsCPU] = resource.MustParse("2")
hard[v1.ResourceLimitsMemory] = resource.MustParse("1Gi")
}
return &v1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.ResourceQuotaSpec{Hard: hard,
ScopeSelector: &v1.ScopeSelector{
MatchExpressions: []v1.ScopedResourceSelectorRequirement{
{
ScopeName: scope,
Operator: v1.ScopeSelectorOpExists},
},
},
},
}
}
// newTestResourceQuotaWithScope returns a quota that enforces default constraints for testing with scopes
func newTestResourceQuotaWithScope(name string, scope v1.ResourceQuotaScope) *v1.ResourceQuota {
hard := v1.ResourceList{}
@@ -750,6 +1282,25 @@ func newTestResourceQuotaWithScope(name string, scope v1.ResourceQuotaScope) *v1
}
}
// newTestResourceQuotaWithScopeForPriorityClass returns a quota
// that enforces default constraints for testing with ResourceQuotaScopePriorityClass scope
func newTestResourceQuotaWithScopeForPriorityClass(name string, hard v1.ResourceList, op v1.ScopeSelectorOperator, values []string) *v1.ResourceQuota {
return &v1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.ResourceQuotaSpec{Hard: hard,
ScopeSelector: &v1.ScopeSelector{
MatchExpressions: []v1.ScopedResourceSelectorRequirement{
{
ScopeName: v1.ResourceQuotaScopePriorityClass,
Operator: op,
Values: values,
},
},
},
},
}
}
// newTestResourceQuotaForEphemeralStorage returns a quota that enforces default constraints for testing feature LocalStorageCapacityIsolation
func newTestResourceQuotaForEphemeralStorage(name string) *v1.ResourceQuota {
hard := v1.ResourceList{}
@@ -810,6 +1361,28 @@ func newTestPodForQuota(f *framework.Framework, name string, requests v1.Resourc
}
}
// newTestPodForQuotaWithPriority returns a pod that has the specified requests, limits and priority class
func newTestPodForQuotaWithPriority(f *framework.Framework, name string, requests v1.ResourceList, limits v1.ResourceList, pclass string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: imageutils.GetPauseImageName(),
Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,
},
},
},
PriorityClassName: pclass,
},
}
}
// newTestPersistentVolumeClaimForQuota returns a simple persistent volume claim
func newTestPersistentVolumeClaimForQuota(name string) *v1.PersistentVolumeClaim {
return &v1.PersistentVolumeClaim{

View File

@@ -26,7 +26,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
@@ -110,9 +109,9 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
Expect(err).NotTo(HaveOccurred())
// Now make sure they're spread across zones
zoneNames, err := getZoneNames(f.ClientSet)
zoneNames, err := framework.GetClusterZones(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames)).To(Equal(true))
Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(Equal(true))
}
// Find the name of the zone in which a Node is running
@@ -126,25 +125,9 @@ func getZoneNameForNode(node v1.Node) (string, error) {
node.Name, kubeletapis.LabelZoneFailureDomain)
}
// TODO (verult) Merge with framework.GetClusterZones()
// Find the names of all zones in which we have nodes in this cluster.
func getZoneNames(c clientset.Interface) ([]string, error) {
zoneNames := sets.NewString()
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, err
}
for _, node := range nodes.Items {
zoneName, err := getZoneNameForNode(node)
Expect(err).NotTo(HaveOccurred())
zoneNames.Insert(zoneName)
}
return zoneNames.List(), nil
}
// Return the number of zones in which we have nodes in this cluster.
func getZoneCount(c clientset.Interface) (int, error) {
zoneNames, err := getZoneNames(c)
zoneNames, err := framework.GetClusterZones(c)
if err != nil {
return -1, err
}
@@ -239,7 +222,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
Expect(err).NotTo(HaveOccurred())
// Now make sure they're spread across zones
zoneNames, err := getZoneNames(f.ClientSet)
zoneNames, err := framework.GetClusterZones(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames)).To(Equal(true))
Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(Equal(true))
}

View File

@@ -185,9 +185,9 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
c := f.ClientSet
ns := f.Namespace.Name
zones, err := getZoneNames(c)
zones, err := framework.GetClusterZones(c)
Expect(err).NotTo(HaveOccurred())
zonelist := zones.List()
By("Creating static PVs across zones")
configs := make([]*staticPVTestConfig, podCount)
for i := range configs {
@@ -208,7 +208,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
}()
for i, config := range configs {
zone := zones[i%len(zones)]
zone := zonelist[i%len(zones)]
config.pvSource, err = framework.CreatePVSource(zone)
Expect(err).NotTo(HaveOccurred())