Add generated file

This PR adds generated files under pkg/client and vendor folder.
This commit is contained in:
xing-yang
2018-07-12 10:55:15 -07:00
parent 36b1de0341
commit e213d1890d
17729 changed files with 5090889 additions and 0 deletions

85
vendor/k8s.io/kubernetes/test/e2e/scheduling/BUILD generated vendored Normal file
View File

@@ -0,0 +1,85 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"equivalence_cache_predicates.go",
"events.go",
"framework.go",
"limit_range.go",
"nvidia-gpus.go",
"predicates.go",
"preemption.go",
"priorities.go",
"rescheduler.go",
"resource_quota.go",
"ubernetes_lite.go",
"ubernetes_lite_volumes.go",
],
importpath = "k8s.io/kubernetes/test/e2e/scheduling",
visibility = ["//visibility:public"],
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/apis/scheduling:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/quota/evaluator/core:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
"//pkg/util/version:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/google.golang.org/api/compute/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/scheduling/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["taints_test.go"],
embed = [":go_default_library"],
deps = [
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

4
vendor/k8s.io/kubernetes/test/e2e/scheduling/OWNERS generated vendored Normal file
View File

@@ -0,0 +1,4 @@
approvers:
- sig-scheduling-maintainers
reviewers:
- sig-scheduling

View File

@@ -0,0 +1,287 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"fmt"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
_ "github.com/stretchr/testify/assert"
)
var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
var cs clientset.Interface
var nodeList *v1.NodeList
var masterNodes sets.String
var systemPodsNo int
var ns string
f := framework.NewDefaultFramework("equivalence-cache")
ignoreLabels := framework.ImagePullerLabels
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
framework.WaitForAllNodesHealthy(cs, time.Minute)
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
framework.ExpectNoError(framework.CheckTestingNSDeletedExcept(cs, ns))
// Every test case in this suite assumes that cluster add-on pods stay stable and
// cannot be run in parallel with any other test that touches Nodes or Pods.
// It is so because we need to have precise control on what's running in the cluster.
systemPods, err := framework.GetPodsInNamespace(cs, ns, ignoreLabels)
Expect(err).NotTo(HaveOccurred())
systemPodsNo = 0
for _, pod := range systemPods {
if !masterNodes.Has(pod.Spec.NodeName) && pod.DeletionTimestamp == nil {
systemPodsNo++
}
}
err = framework.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), int32(systemPodsNo), framework.PodReadyBeforeTimeout, ignoreLabels)
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeList.Items {
framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
framework.PrintAllKubeletPods(cs, node.Name)
}
})
// This test verifies that GeneralPredicates works as expected:
// When a replica pod (with HostPorts) is scheduled to a node, it will invalidate GeneralPredicates cache on this node,
// so that subsequent replica pods with same host port claim will be rejected.
// We enforce all replica pods bind to the same node so there will always be conflicts.
It("validates GeneralPredicates is properly invalidated when a pod is scheduled [Slow]", func() {
By("Launching a RC with two replica pods with HostPorts")
nodeName := getNodeThatCanRunPodWithoutToleration(f)
rcName := "host-port"
// bind all replicas to same node
nodeSelector := map[string]string{"kubernetes.io/hostname": nodeName}
By("One pod should be scheduled, the other should be rejected")
// CreateNodeSelectorPods creates RC with host port 4312
WaitForSchedulerAfterAction(f, func() error {
err := CreateNodeSelectorPods(f, rcName, 2, nodeSelector, false)
return err
}, ns, rcName, false)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, rcName)
// the first replica pod is scheduled, and the second pod will be rejected.
verifyResult(cs, 1, 1, ns)
})
// This test verifies that MatchInterPodAffinity works as expected.
// In equivalence cache, it does not handle inter pod affinity (anti-affinity) specially (unless node label changed),
// because current predicates algorithm will ensure newly scheduled pod does not break existing affinity in cluster.
It("validates pod affinity works properly when new replica pod is scheduled", func() {
// create a pod running with label {security: S1}, and choose this node
nodeName, _ := runAndKeepPodWithLabelAndGetNodeName(f)
By("Trying to apply a random label on the found node.")
// we need to use real failure domains, since scheduler only know them
k := "failure-domain.beta.kubernetes.io/zone"
v := "equivalence-e2e-test"
oldValue := framework.AddOrUpdateLabelOnNodeAndReturnOldValue(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(cs, nodeName, k, v)
// restore the node label
defer framework.AddOrUpdateLabelOnNode(cs, nodeName, k, oldValue)
By("Trying to schedule RC with Pod Affinity should success.")
framework.WaitForStableCluster(cs, masterNodes)
affinityRCName := "with-pod-affinity-" + string(uuid.NewUUID())
replica := 2
labelsMap := map[string]string{
"name": affinityRCName,
}
affinity := &v1.Affinity{
PodAffinity: &v1.PodAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "security",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"S1"},
},
},
},
TopologyKey: k,
Namespaces: []string{ns},
},
},
},
}
rc := getRCWithInterPodAffinity(affinityRCName, labelsMap, replica, affinity, imageutils.GetPauseImageName())
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, affinityRCName)
// RC should be running successfully
// TODO: WaitForSchedulerAfterAction() can on be used to wait for failure event,
// not for successful RC, since no specific pod name can be provided.
_, err := cs.CoreV1().ReplicationControllers(ns).Create(rc)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForControlledPodsRunning(cs, ns, affinityRCName, api.Kind("ReplicationController")))
By("Remove node failure domain label")
framework.RemoveLabelOffNode(cs, nodeName, k)
By("Trying to schedule another equivalent Pod should fail due to node label has been removed.")
// use scale to create another equivalent pod and wait for failure event
WaitForSchedulerAfterAction(f, func() error {
err := framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, affinityRCName, uint(replica+1), false)
return err
}, ns, affinityRCName, false)
// and this new pod should be rejected since node label has been updated
verifyReplicasResult(cs, replica, 1, ns, affinityRCName)
})
// This test verifies that MatchInterPodAffinity (anti-affinity) is respected as expected.
It("validates pod anti-affinity works properly when new replica pod is scheduled", func() {
By("Launching two pods on two distinct nodes to get two node names")
CreateHostPortPods(f, "host-port", 2, true)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, "host-port")
podList, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{})
framework.ExpectNoError(err)
Expect(len(podList.Items)).To(Equal(2))
nodeNames := []string{podList.Items[0].Spec.NodeName, podList.Items[1].Spec.NodeName}
Expect(nodeNames[0]).ToNot(Equal(nodeNames[1]))
By("Applying a random label to both nodes.")
k := "e2e.inter-pod-affinity.kubernetes.io/zone"
v := "equivalence-e2etest"
for _, nodeName := range nodeNames {
framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(cs, nodeName, k)
}
By("Trying to launch a pod with the service label on the selected nodes.")
// run a pod with label {"service": "S1"} and expect it to be running
runPausePod(f, pausePodConfig{
Name: "with-label-" + string(uuid.NewUUID()),
Labels: map[string]string{"service": "S1"},
NodeSelector: map[string]string{k: v}, // only launch on our two nodes
})
By("Trying to launch RC with podAntiAffinity on these two nodes should be rejected.")
labelRCName := "with-podantiaffinity-" + string(uuid.NewUUID())
replica := 2
labelsMap := map[string]string{
"name": labelRCName,
}
affinity := &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "service",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"S1"},
},
},
},
TopologyKey: k,
Namespaces: []string{ns},
},
},
},
}
rc := getRCWithInterPodAffinityNodeSelector(labelRCName, labelsMap, replica, affinity,
imageutils.GetPauseImageName(), map[string]string{k: v})
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, labelRCName)
WaitForSchedulerAfterAction(f, func() error {
_, err := cs.CoreV1().ReplicationControllers(ns).Create(rc)
return err
}, ns, labelRCName, false)
// these two replicas should all be rejected since podAntiAffinity says it they anit-affinity with pod {"service": "S1"}
verifyReplicasResult(cs, 0, replica, ns, labelRCName)
})
})
// getRCWithInterPodAffinity returns RC with given affinity rules.
func getRCWithInterPodAffinity(name string, labelsMap map[string]string, replica int, affinity *v1.Affinity, image string) *v1.ReplicationController {
return getRCWithInterPodAffinityNodeSelector(name, labelsMap, replica, affinity, image, map[string]string{})
}
// getRCWithInterPodAffinity returns RC with given affinity rules and node selector.
func getRCWithInterPodAffinityNodeSelector(name string, labelsMap map[string]string, replica int, affinity *v1.Affinity, image string, nodeSelector map[string]string) *v1.ReplicationController {
replicaInt32 := int32(replica)
return &v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.ReplicationControllerSpec{
Replicas: &replicaInt32,
Selector: labelsMap,
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labelsMap,
},
Spec: v1.PodSpec{
Affinity: affinity,
Containers: []v1.Container{
{
Name: name,
Image: image,
},
},
DNSPolicy: v1.DNSDefault,
NodeSelector: nodeSelector,
},
},
},
}
}
func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) error {
By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: f.Namespace.Name,
Timeout: defaultTimeout,
Image: imageutils.GetPauseImageName(),
Replicas: replicas,
HostPorts: map[string]int{"port1": 4321},
NodeSelector: nodeSelector,
}
err := framework.RunRC(*config)
if expectRunning {
return err
}
return nil
}

41
vendor/k8s.io/kubernetes/test/e2e/scheduling/events.go generated vendored Normal file
View File

@@ -0,0 +1,41 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"fmt"
"strings"
"k8s.io/api/core/v1"
)
func scheduleSuccessEvent(ns, podName, nodeName string) func(*v1.Event) bool {
return func(e *v1.Event) bool {
return e.Type == v1.EventTypeNormal &&
e.Reason == "Scheduled" &&
strings.HasPrefix(e.Name, podName) &&
strings.Contains(e.Message, fmt.Sprintf("Successfully assigned %v/%v to %v", ns, podName, nodeName))
}
}
func scheduleFailureEvent(podName string) func(*v1.Event) bool {
return func(e *v1.Event) bool {
return strings.HasPrefix(e.Name, podName) &&
e.Type == "Warning" &&
e.Reason == "FailedScheduling"
}
}

View File

@@ -0,0 +1,23 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import "github.com/onsi/ginkgo"
func SIGDescribe(text string, body func()) bool {
return ginkgo.Describe("[sig-scheduling] "+text, body)
}

View File

@@ -0,0 +1,254 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"fmt"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
podName = "pfpod"
)
var _ = SIGDescribe("LimitRange", func() {
f := framework.NewDefaultFramework("limitrange")
It("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() {
By("Creating a LimitRange")
min := getResourceList("50m", "100Mi", "100Gi")
max := getResourceList("500m", "500Mi", "500Gi")
defaultLimit := getResourceList("500m", "500Mi", "500Gi")
defaultRequest := getResourceList("100m", "200Mi", "200Gi")
maxLimitRequestRatio := v1.ResourceList{}
limitRange := newLimitRange("limit-range", v1.LimitTypeContainer,
min, max,
defaultLimit, defaultRequest,
maxLimitRequestRatio)
By("Setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name}))
options := metav1.ListOptions{LabelSelector: selector.String()}
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for limitRanges")
Expect(len(limitRanges.Items)).To(Equal(0))
options = metav1.ListOptions{
LabelSelector: selector.String(),
ResourceVersion: limitRanges.ListMeta.ResourceVersion,
}
w, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Watch(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to set up watch")
By("Submitting a LimitRange")
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(limitRange)
Expect(err).NotTo(HaveOccurred())
By("Verifying LimitRange creation was observed")
select {
case event, _ := <-w.ResultChan():
if event.Type != watch.Added {
framework.Failf("Failed to observe pod creation: %v", event)
}
case <-time.After(framework.ServiceRespondingTimeout):
framework.Failf("Timeout while waiting for LimitRange creation")
}
By("Fetching the LimitRange to ensure it has proper values")
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit}
actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default}
err = equalResourceRequirement(expected, actual)
Expect(err).NotTo(HaveOccurred())
By("Creating a Pod with no resource requirements")
pod := f.NewTestPod("pod-no-resources", v1.ResourceList{}, v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring Pod has resource requirements applied from LimitRange")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
for i := range pod.Spec.Containers {
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
if err != nil {
// Print the pod to help in debugging.
framework.Logf("Pod %+v does not have the expected requirements", pod)
Expect(err).NotTo(HaveOccurred())
}
}
By("Creating a Pod with partial resource requirements")
pod = f.NewTestPod("pod-partial-resources", getResourceList("", "150Mi", "150Gi"), getResourceList("300m", "", ""))
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring Pod has merged resource requirements applied from LimitRange")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// This is an interesting case, so it's worth a comment
// If you specify a Limit, and no Request, the Limit will default to the Request
// This means that the LimitRange.DefaultRequest will ONLY take affect if a container.resources.limit is not supplied
expected = v1.ResourceRequirements{Requests: getResourceList("300m", "150Mi", "150Gi"), Limits: getResourceList("300m", "500Mi", "500Gi")}
for i := range pod.Spec.Containers {
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
if err != nil {
// Print the pod to help in debugging.
framework.Logf("Pod %+v does not have the expected requirements", pod)
Expect(err).NotTo(HaveOccurred())
}
}
By("Failing to create a Pod with less than min resources")
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
By("Failing to create a Pod with more than max resources")
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
By("Updating a LimitRange")
newMin := getResourceList("9m", "49Mi", "49Gi")
limitRange.Spec.Limits[0].Min = newMin
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Update(limitRange)
Expect(err).NotTo(HaveOccurred())
By("Creating a Pod with less than former min resources")
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Failing to create a Pod with more than max resources")
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
By("Deleting a LimitRange")
err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Delete(limitRange.Name, metav1.NewDeleteOptions(30))
Expect(err).NotTo(HaveOccurred())
By("Verifying the LimitRange was deleted")
Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name}))
options := metav1.ListOptions{LabelSelector: selector.String()}
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
if err != nil {
framework.Logf("Unable to retrieve LimitRanges: %v", err)
return false, nil
}
if len(limitRanges.Items) == 0 {
framework.Logf("limitRange is already deleted")
return true, nil
}
if len(limitRanges.Items) > 0 {
if limitRanges.Items[0].ObjectMeta.DeletionTimestamp == nil {
framework.Logf("deletion has not yet been observed")
return false, nil
}
return true, nil
}
return false, nil
})).NotTo(HaveOccurred(), "kubelet never observed the termination notice")
By("Creating a Pod with more than former max resources")
pod = f.NewTestPod(podName+"2", getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
})
})
func equalResourceRequirement(expected v1.ResourceRequirements, actual v1.ResourceRequirements) error {
framework.Logf("Verifying requests: expected %v with actual %v", expected.Requests, actual.Requests)
err := equalResourceList(expected.Requests, actual.Requests)
if err != nil {
return err
}
framework.Logf("Verifying limits: expected %v with actual %v", expected.Limits, actual.Limits)
err = equalResourceList(expected.Limits, actual.Limits)
return err
}
func equalResourceList(expected v1.ResourceList, actual v1.ResourceList) error {
for k, v := range expected {
if actualValue, found := actual[k]; !found || (v.Cmp(actualValue) != 0) {
return fmt.Errorf("resource %v expected %v actual %v", k, v.String(), actualValue.String())
}
}
for k, v := range actual {
if expectedValue, found := expected[k]; !found || (v.Cmp(expectedValue) != 0) {
return fmt.Errorf("resource %v expected %v actual %v", k, expectedValue.String(), v.String())
}
}
return nil
}
func getResourceList(cpu, memory string, ephemeralStorage string) v1.ResourceList {
res := v1.ResourceList{}
if cpu != "" {
res[v1.ResourceCPU] = resource.MustParse(cpu)
}
if memory != "" {
res[v1.ResourceMemory] = resource.MustParse(memory)
}
if ephemeralStorage != "" {
res[v1.ResourceEphemeralStorage] = resource.MustParse(ephemeralStorage)
}
return res
}
// newLimitRange returns a limit range with specified data
func newLimitRange(name string, limitType v1.LimitType,
min, max,
defaultLimit, defaultRequest,
maxLimitRequestRatio v1.ResourceList) *v1.LimitRange {
return &v1.LimitRange{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.LimitRangeSpec{
Limits: []v1.LimitRangeItem{
{
Type: limitType,
Min: min,
Max: max,
Default: defaultLimit,
DefaultRequest: defaultRequest,
MaxLimitRequestRatio: maxLimitRequestRatio,
},
},
},
}
}

View File

@@ -0,0 +1,192 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"os"
"strings"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
testPodNamePrefix = "nvidia-gpu-"
cosOSImage = "Container-Optimized OS from Google"
// Nvidia driver installation can take upwards of 5 minutes.
driverInstallTimeout = 10 * time.Minute
)
var (
gpuResourceName v1.ResourceName
dsYamlUrl string
)
func makeCudaAdditionDevicePluginTestPod() *v1.Pod {
podName := testPodNamePrefix + string(uuid.NewUUID())
testPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Name: "vector-addition",
Image: imageutils.GetE2EImage(imageutils.CudaVectorAdd),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
gpuResourceName: *resource.NewQuantity(1, resource.DecimalSI),
},
},
},
},
},
}
return testPod
}
func isClusterRunningCOS(f *framework.Framework) bool {
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err, "getting node list")
for _, node := range nodeList.Items {
if !strings.Contains(node.Status.NodeInfo.OSImage, cosOSImage) {
return false
}
}
return true
}
func areGPUsAvailableOnAllSchedulableNodes(f *framework.Framework) bool {
framework.Logf("Getting list of Nodes from API server")
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err, "getting node list")
for _, node := range nodeList.Items {
if node.Spec.Unschedulable {
continue
}
framework.Logf("gpuResourceName %s", gpuResourceName)
if val, ok := node.Status.Capacity[gpuResourceName]; !ok || val.Value() == 0 {
framework.Logf("Nvidia GPUs not available on Node: %q", node.Name)
return false
}
}
framework.Logf("Nvidia GPUs exist on all schedulable nodes")
return true
}
func getGPUsAvailable(f *framework.Framework) int64 {
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err, "getting node list")
var gpusAvailable int64
for _, node := range nodeList.Items {
if val, ok := node.Status.Capacity[gpuResourceName]; ok {
gpusAvailable += (&val).Value()
}
}
return gpusAvailable
}
func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *framework.ContainerResourceGatherer {
// Skip the test if the base image is not COS.
// TODO: Add support for other base images.
// CUDA apps require host mounts which is not portable across base images (yet).
framework.Logf("Checking base image")
if !isClusterRunningCOS(f) {
Skip("Nvidia GPU tests are supproted only on Container Optimized OS image currently")
}
framework.Logf("Cluster is running on COS. Proceeding with test")
dsYamlUrlFromEnv := os.Getenv("NVIDIA_DRIVER_INSTALLER_DAEMONSET")
if dsYamlUrlFromEnv != "" {
dsYamlUrl = dsYamlUrlFromEnv
} else {
dsYamlUrl = "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml"
}
gpuResourceName = framework.NVIDIAGPUResourceName
framework.Logf("Using %v", dsYamlUrl)
// Creates the DaemonSet that installs Nvidia Drivers.
ds, err := framework.DsFromManifest(dsYamlUrl)
Expect(err).NotTo(HaveOccurred())
ds.Namespace = f.Namespace.Name
_, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ds)
framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset")
framework.Logf("Successfully created daemonset to install Nvidia drivers.")
pods, err := framework.WaitForControlledPods(f.ClientSet, ds.Namespace, ds.Name, extensionsinternal.Kind("DaemonSet"))
framework.ExpectNoError(err, "failed to get pods controlled by the nvidia-driver-installer daemonset")
devicepluginPods, err := framework.WaitForControlledPods(f.ClientSet, "kube-system", "nvidia-gpu-device-plugin", extensionsinternal.Kind("DaemonSet"))
if err == nil {
framework.Logf("Adding deviceplugin addon pod.")
pods.Items = append(pods.Items, devicepluginPods.Items...)
}
var rsgather *framework.ContainerResourceGatherer
if setupResourceGatherer {
framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.")
rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, MasterOnly: false, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods)
framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods")
go rsgather.StartGatheringData()
}
// Wait for Nvidia GPUs to be available on nodes
framework.Logf("Waiting for drivers to be installed and GPUs to be available in Node Capacity...")
Eventually(func() bool {
return areGPUsAvailableOnAllSchedulableNodes(f)
}, driverInstallTimeout, time.Second).Should(BeTrue())
return rsgather
}
func testNvidiaGPUsOnCOS(f *framework.Framework) {
rsgather := SetupNVIDIAGPUNode(f, true)
framework.Logf("Creating as many pods as there are Nvidia GPUs and have the pods run a CUDA app")
podList := []*v1.Pod{}
for i := int64(0); i < getGPUsAvailable(f); i++ {
podList = append(podList, f.PodClient().Create(makeCudaAdditionDevicePluginTestPod()))
}
framework.Logf("Wait for all test pods to succeed")
// Wait for all pods to succeed
for _, po := range podList {
f.PodClient().WaitForSuccess(po.Name, 5*time.Minute)
}
framework.Logf("Stopping ResourceUsageGather")
constraints := make(map[string]framework.ResourceConstraint)
// For now, just gets summary. Can pass valid constraints in the future.
summary, err := rsgather.StopAndSummarize([]int{50, 90, 100}, constraints)
f.TestSummaries = append(f.TestSummaries, summary)
framework.ExpectNoError(err, "getting resource usage summary")
}
var _ = SIGDescribe("[Feature:GPUDevicePlugin]", func() {
f := framework.NewDefaultFramework("device-plugin-gpus")
It("run Nvidia GPU Device Plugin tests on Container Optimized OS only", func() {
testNvidiaGPUsOnCOS(f)
})
})

View File

@@ -0,0 +1,832 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"fmt"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
_ "github.com/stretchr/testify/assert"
)
const maxNumberOfPods int64 = 10
var localStorageVersion = utilversion.MustParseSemantic("v1.8.0-beta.0")
// variable set in BeforeEach, never modified afterwards
var masterNodes sets.String
type pausePodConfig struct {
Name string
Affinity *v1.Affinity
Annotations, Labels, NodeSelector map[string]string
Resources *v1.ResourceRequirements
Tolerations []v1.Toleration
NodeName string
Ports []v1.ContainerPort
OwnerReferences []metav1.OwnerReference
PriorityClassName string
}
var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
var cs clientset.Interface
var nodeList *v1.NodeList
var systemPodsNo int
var totalPodCapacity int64
var RCName string
var ns string
f := framework.NewDefaultFramework("sched-pred")
ignoreLabels := framework.ImagePullerLabels
AfterEach(func() {
rc, err := cs.CoreV1().ReplicationControllers(ns).Get(RCName, metav1.GetOptions{})
if err == nil && *(rc.Spec.Replicas) != 0 {
By("Cleaning up the replication controller")
err := framework.DeleteRCAndWaitForGC(f.ClientSet, ns, RCName)
framework.ExpectNoError(err)
}
})
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
nodeList = &v1.NodeList{}
framework.WaitForAllNodesHealthy(cs, time.Minute)
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
err := framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
// Every test case in this suite assumes that cluster add-on pods stay stable and
// cannot be run in parallel with any other test that touches Nodes or Pods.
// It is so because we need to have precise control on what's running in the cluster.
systemPods, err := framework.GetPodsInNamespace(cs, ns, ignoreLabels)
Expect(err).NotTo(HaveOccurred())
systemPodsNo = 0
for _, pod := range systemPods {
if !masterNodes.Has(pod.Spec.NodeName) && pod.DeletionTimestamp == nil {
systemPodsNo++
}
}
err = framework.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, ignoreLabels)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPodsSuccess(cs, metav1.NamespaceSystem, framework.ImagePullerLabels, framework.ImagePrePullingTimeout)
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeList.Items {
framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
framework.PrintAllKubeletPods(cs, node.Name)
}
})
// This test verifies that max-pods flag works as advertised. It assumes that cluster add-on pods stay stable
// and cannot be run in parallel with any other test that touches Nodes or Pods. It is so because to check
// if max-pods is working we need to fully saturate the cluster and keep it in this state for few seconds.
//
// Slow PR #13315 (8 min)
It("validates MaxPods limit number of pods that are allowed to run [Slow]", func() {
totalPodCapacity = 0
for _, node := range nodeList.Items {
framework.Logf("Node: %v", node)
podCapacity, found := node.Status.Capacity[v1.ResourcePods]
Expect(found).To(Equal(true))
totalPodCapacity += podCapacity.Value()
}
currentlyScheduledPods := framework.WaitForStableCluster(cs, masterNodes)
podsNeededForSaturation := int(totalPodCapacity) - currentlyScheduledPods
By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation))
// As the pods are distributed randomly among nodes,
// it can easily happen that all nodes are satured
// and there is no need to create additional pods.
// StartPods requires at least one pod to replicate.
if podsNeededForSaturation > 0 {
framework.ExpectNoError(testutils.StartPods(cs, podsNeededForSaturation, ns, "maxp",
*initPausePod(f, pausePodConfig{
Name: "",
Labels: map[string]string{"name": ""},
}), true, framework.Logf))
}
podName := "additional-pod"
WaitForSchedulerAfterAction(f, createPausePodAction(f, pausePodConfig{
Name: podName,
Labels: map[string]string{"name": "additional"},
}), ns, podName, false)
verifyResult(cs, podsNeededForSaturation, 1, ns)
})
// This test verifies we don't allow scheduling of pods in a way that sum of local ephemeral storage limits of pods is greater than machines capacity.
// It assumes that cluster add-on pods stay stable and cannot be run in parallel with any other test that touches Nodes or Pods.
// It is so because we need to have precise control on what's running in the cluster.
It("validates local ephemeral storage resource limits of pods that are allowed to run [Feature:LocalStorageCapacityIsolation]", func() {
framework.SkipUnlessServerVersionGTE(localStorageVersion, f.ClientSet.Discovery())
nodeMaxAllocatable := int64(0)
nodeToAllocatableMap := make(map[string]int64)
for _, node := range nodeList.Items {
allocatable, found := node.Status.Allocatable[v1.ResourceEphemeralStorage]
Expect(found).To(Equal(true))
nodeToAllocatableMap[node.Name] = allocatable.MilliValue()
if nodeMaxAllocatable < allocatable.MilliValue() {
nodeMaxAllocatable = allocatable.MilliValue()
}
}
framework.WaitForStableCluster(cs, masterNodes)
pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
framework.ExpectNoError(err)
for _, pod := range pods.Items {
_, found := nodeToAllocatableMap[pod.Spec.NodeName]
if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
framework.Logf("Pod %v requesting local ephemeral resource =%vm on Node %v", pod.Name, getRequestedStorageEphemeralStorage(pod), pod.Spec.NodeName)
nodeToAllocatableMap[pod.Spec.NodeName] -= getRequestedStorageEphemeralStorage(pod)
}
}
var podsNeededForSaturation int
milliEphemeralStoragePerPod := nodeMaxAllocatable / maxNumberOfPods
framework.Logf("Using pod capacity: %vm", milliEphemeralStoragePerPod)
for name, leftAllocatable := range nodeToAllocatableMap {
framework.Logf("Node: %v has local ephemeral resource allocatable: %vm", name, leftAllocatable)
podsNeededForSaturation += (int)(leftAllocatable / milliEphemeralStoragePerPod)
}
By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster local ephemeral resource and trying to start another one", podsNeededForSaturation))
// As the pods are distributed randomly among nodes,
// it can easily happen that all nodes are saturated
// and there is no need to create additional pods.
// StartPods requires at least one pod to replicate.
if podsNeededForSaturation > 0 {
framework.ExpectNoError(testutils.StartPods(cs, podsNeededForSaturation, ns, "overcommit",
*initPausePod(f, pausePodConfig{
Name: "",
Labels: map[string]string{"name": ""},
Resources: &v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceEphemeralStorage: *resource.NewMilliQuantity(milliEphemeralStoragePerPod, "DecimalSI"),
},
Requests: v1.ResourceList{
v1.ResourceEphemeralStorage: *resource.NewMilliQuantity(milliEphemeralStoragePerPod, "DecimalSI"),
},
},
}), true, framework.Logf))
}
podName := "additional-pod"
conf := pausePodConfig{
Name: podName,
Labels: map[string]string{"name": "additional"},
Resources: &v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceEphemeralStorage: *resource.NewMilliQuantity(milliEphemeralStoragePerPod, "DecimalSI"),
},
},
}
WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false)
verifyResult(cs, podsNeededForSaturation, 1, ns)
})
// This test verifies we don't allow scheduling of pods in a way that sum of
// limits of pods is greater than machines capacity.
// It assumes that cluster add-on pods stay stable and cannot be run in parallel
// with any other test that touches Nodes or Pods.
// It is so because we need to have precise control on what's running in the cluster.
// Test scenario:
// 1. Find the amount CPU resources on each node.
// 2. Create one pod with affinity to each node that uses 70% of the node CPU.
// 3. Wait for the pods to be scheduled.
// 4. Create another pod with no affinity to any node that need 50% of the largest node CPU.
// 5. Make sure this additional pod is not scheduled.
/*
Testname: scheduler-resource-limits
Description: Ensure that scheduler accounts node resources correctly
and respects pods' resource requirements during scheduling.
*/
framework.ConformanceIt("validates resource limits of pods that are allowed to run ", func() {
framework.WaitForStableCluster(cs, masterNodes)
nodeMaxAllocatable := int64(0)
nodeToAllocatableMap := make(map[string]int64)
for _, node := range nodeList.Items {
nodeReady := false
for _, condition := range node.Status.Conditions {
if condition.Type == v1.NodeReady && condition.Status == v1.ConditionTrue {
nodeReady = true
break
}
}
if !nodeReady {
continue
}
// Apply node label to each node
framework.AddOrUpdateLabelOnNode(cs, node.Name, "node", node.Name)
framework.ExpectNodeHasLabel(cs, node.Name, "node", node.Name)
// Find allocatable amount of CPU.
allocatable, found := node.Status.Allocatable[v1.ResourceCPU]
Expect(found).To(Equal(true))
nodeToAllocatableMap[node.Name] = allocatable.MilliValue()
if nodeMaxAllocatable < allocatable.MilliValue() {
nodeMaxAllocatable = allocatable.MilliValue()
}
}
// Clean up added labels after this test.
defer func() {
for nodeName := range nodeToAllocatableMap {
framework.RemoveLabelOffNode(cs, nodeName, "node")
}
}()
pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
framework.ExpectNoError(err)
for _, pod := range pods.Items {
_, found := nodeToAllocatableMap[pod.Spec.NodeName]
if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
framework.Logf("Pod %v requesting resource cpu=%vm on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName)
nodeToAllocatableMap[pod.Spec.NodeName] -= getRequestedCPU(pod)
}
}
By("Starting Pods to consume most of the cluster CPU.")
// Create one pod per node that requires 70% of the node remaining CPU.
fillerPods := []*v1.Pod{}
for nodeName, cpu := range nodeToAllocatableMap {
requestedCPU := cpu * 7 / 10
fillerPods = append(fillerPods, createPausePod(f, pausePodConfig{
Name: "filler-pod-" + string(uuid.NewUUID()),
Resources: &v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(requestedCPU, "DecimalSI"),
},
Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(requestedCPU, "DecimalSI"),
},
},
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "node",
Operator: v1.NodeSelectorOpIn,
Values: []string{nodeName},
},
},
},
},
},
},
},
}))
}
// Wait for filler pods to schedule.
for _, pod := range fillerPods {
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
}
By("Creating another pod that requires unavailable amount of CPU.")
// Create another pod that requires 50% of the largest node CPU resources.
// This pod should remain pending as at least 70% of CPU of other nodes in
// the cluster are already consumed.
podName := "additional-pod"
conf := pausePodConfig{
Name: podName,
Labels: map[string]string{"name": "additional"},
Resources: &v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(nodeMaxAllocatable*5/10, "DecimalSI"),
},
},
}
WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false)
verifyResult(cs, len(fillerPods), 1, ns)
})
// Test Nodes does not have any label, hence it should be impossible to schedule Pod with
// nonempty Selector set.
/*
Testname: scheduler-node-selector-not-matching
Description: Ensure that scheduler respects the NodeSelector field of
PodSpec during scheduling (when it does not match any node).
*/
framework.ConformanceIt("validates that NodeSelector is respected if not matching ", func() {
By("Trying to schedule Pod with nonempty NodeSelector.")
podName := "restricted-pod"
framework.WaitForStableCluster(cs, masterNodes)
conf := pausePodConfig{
Name: podName,
Labels: map[string]string{"name": "restricted"},
NodeSelector: map[string]string{
"label": "nonempty",
},
}
WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false)
verifyResult(cs, 0, 1, ns)
})
/*
Testname: scheduler-node-selector-matching
Description: Ensure that scheduler respects the NodeSelector field
of PodSpec during scheduling (when it matches).
*/
framework.ConformanceIt("validates that NodeSelector is respected if matching ", func() {
nodeName := GetNodeThatCanRunPod(f)
By("Trying to apply a random label on the found node.")
k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
v := "42"
framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(cs, nodeName, k)
By("Trying to relaunch the pod, now with labels.")
labelPodName := "with-labels"
createPausePod(f, pausePodConfig{
Name: labelPodName,
NodeSelector: map[string]string{
k: v,
},
})
// check that pod got scheduled. We intentionally DO NOT check that the
// pod is running because this will create a race condition with the
// kubelet and the scheduler: the scheduler might have scheduled a pod
// already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod.
framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName))
labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{})
framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
})
// Test Nodes does not have any label, hence it should be impossible to schedule Pod with
// non-nil NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.
It("validates that NodeAffinity is respected if not matching", func() {
By("Trying to schedule Pod with nonempty NodeSelector.")
podName := "restricted-pod"
framework.WaitForStableCluster(cs, masterNodes)
conf := pausePodConfig{
Name: podName,
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "foo",
Operator: v1.NodeSelectorOpIn,
Values: []string{"bar", "value2"},
},
},
}, {
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "diffkey",
Operator: v1.NodeSelectorOpIn,
Values: []string{"wrong", "value2"},
},
},
},
},
},
},
},
Labels: map[string]string{"name": "restricted"},
}
WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false)
verifyResult(cs, 0, 1, ns)
})
// Keep the same steps with the test on NodeSelector,
// but specify Affinity in Pod.Spec.Affinity, instead of NodeSelector.
It("validates that required NodeAffinity setting is respected if matching", func() {
nodeName := GetNodeThatCanRunPod(f)
By("Trying to apply a random label on the found node.")
k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
v := "42"
framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(cs, nodeName, k)
By("Trying to relaunch the pod, now with labels.")
labelPodName := "with-labels"
createPausePod(f, pausePodConfig{
Name: labelPodName,
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: k,
Operator: v1.NodeSelectorOpIn,
Values: []string{v},
},
},
},
},
},
},
},
})
// check that pod got scheduled. We intentionally DO NOT check that the
// pod is running because this will create a race condition with the
// kubelet and the scheduler: the scheduler might have scheduled a pod
// already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod.
framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName))
labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{})
framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
})
// 1. Run a pod to get an available node, then delete the pod
// 2. Taint the node with a random taint
// 3. Try to relaunch the pod with tolerations tolerate the taints on node,
// and the pod's nodeName specified to the name of node found in step 1
It("validates that taints-tolerations is respected if matching", func() {
nodeName := getNodeThatCanRunPodWithoutToleration(f)
By("Trying to apply a random taint on the found node.")
testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-%s", string(uuid.NewUUID())),
Value: "testing-taint-value",
Effect: v1.TaintEffectNoSchedule,
}
framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer framework.RemoveTaintOffNode(cs, nodeName, testTaint)
By("Trying to apply a random label on the found node.")
labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID()))
labelValue := "testing-label-value"
framework.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue)
framework.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue)
defer framework.RemoveLabelOffNode(cs, nodeName, labelKey)
By("Trying to relaunch the pod, now with tolerations.")
tolerationPodName := "with-tolerations"
createPausePod(f, pausePodConfig{
Name: tolerationPodName,
Tolerations: []v1.Toleration{{Key: testTaint.Key, Value: testTaint.Value, Effect: testTaint.Effect}},
NodeSelector: map[string]string{labelKey: labelValue},
})
// check that pod got scheduled. We intentionally DO NOT check that the
// pod is running because this will create a race condition with the
// kubelet and the scheduler: the scheduler might have scheduled a pod
// already when the kubelet does not know about its new taint yet. The
// kubelet will then refuse to launch the pod.
framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, tolerationPodName))
deployedPod, err := cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{})
framework.ExpectNoError(err)
Expect(deployedPod.Spec.NodeName).To(Equal(nodeName))
})
// 1. Run a pod to get an available node, then delete the pod
// 2. Taint the node with a random taint
// 3. Try to relaunch the pod still no tolerations,
// and the pod's nodeName specified to the name of node found in step 1
It("validates that taints-tolerations is respected if not matching", func() {
nodeName := getNodeThatCanRunPodWithoutToleration(f)
By("Trying to apply a random taint on the found node.")
testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-%s", string(uuid.NewUUID())),
Value: "testing-taint-value",
Effect: v1.TaintEffectNoSchedule,
}
framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer framework.RemoveTaintOffNode(cs, nodeName, testTaint)
By("Trying to apply a random label on the found node.")
labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID()))
labelValue := "testing-label-value"
framework.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue)
framework.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue)
defer framework.RemoveLabelOffNode(cs, nodeName, labelKey)
By("Trying to relaunch the pod, still no tolerations.")
podNameNoTolerations := "still-no-tolerations"
conf := pausePodConfig{
Name: podNameNoTolerations,
NodeSelector: map[string]string{labelKey: labelValue},
}
WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podNameNoTolerations, false)
verifyResult(cs, 0, 1, ns)
By("Removing taint off the node")
WaitForSchedulerAfterAction(f, removeTaintFromNodeAction(cs, nodeName, testTaint), ns, podNameNoTolerations, true)
verifyResult(cs, 1, 0, ns)
})
It("validates that there is no conflict between pods with same hostPort but different hostIP and protocol", func() {
nodeName := GetNodeThatCanRunPod(f)
// use nodeSelector to make sure the testing pods get assigned on the same node to explicitly verify there exists conflict or not
By("Trying to apply a random label on the found node.")
k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
v := "90"
nodeSelector := make(map[string]string)
nodeSelector[k] = v
framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(cs, nodeName, k)
port := int32(54321)
By(fmt.Sprintf("Trying to create a pod(pod1) with hostport %v and hostIP 127.0.0.1 and expect scheduled", port))
creatHostPortPodOnNode(f, "pod1", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, true)
By(fmt.Sprintf("Trying to create another pod(pod2) with hostport %v but hostIP 127.0.0.2 on the node which pod1 resides and expect scheduled", port))
creatHostPortPodOnNode(f, "pod2", ns, "127.0.0.2", port, v1.ProtocolTCP, nodeSelector, true)
By(fmt.Sprintf("Trying to create a third pod(pod3) with hostport %v, hostIP 127.0.0.2 but use UDP protocol on the node which pod2 resides", port))
creatHostPortPodOnNode(f, "pod3", ns, "127.0.0.2", port, v1.ProtocolUDP, nodeSelector, true)
})
It("validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP", func() {
nodeName := GetNodeThatCanRunPod(f)
// use nodeSelector to make sure the testing pods get assigned on the same node to explicitly verify there exists conflict or not
By("Trying to apply a random label on the found node.")
k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
v := "95"
nodeSelector := make(map[string]string)
nodeSelector[k] = v
framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(cs, nodeName, k)
port := int32(54322)
By(fmt.Sprintf("Trying to create a pod(pod4) with hostport %v and hostIP 0.0.0.0(empty string here) and expect scheduled", port))
creatHostPortPodOnNode(f, "pod4", ns, "", port, v1.ProtocolTCP, nodeSelector, true)
By(fmt.Sprintf("Trying to create another pod(pod5) with hostport %v but hostIP 127.0.0.1 on the node which pod4 resides and expect not scheduled", port))
creatHostPortPodOnNode(f, "pod5", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, false)
})
})
func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: conf.Name,
Labels: conf.Labels,
Annotations: conf.Annotations,
OwnerReferences: conf.OwnerReferences,
},
Spec: v1.PodSpec{
NodeSelector: conf.NodeSelector,
Affinity: conf.Affinity,
Containers: []v1.Container{
{
Name: conf.Name,
Image: imageutils.GetPauseImageName(),
Ports: conf.Ports,
},
},
Tolerations: conf.Tolerations,
NodeName: conf.NodeName,
PriorityClassName: conf.PriorityClassName,
},
}
if conf.Resources != nil {
pod.Spec.Containers[0].Resources = *conf.Resources
}
return pod
}
func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(initPausePod(f, conf))
framework.ExpectNoError(err)
return pod
}
func runPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
pod := createPausePod(f, conf)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(conf.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
return pod
}
func runPodAndGetNodeName(f *framework.Framework, conf pausePodConfig) string {
// launch a pod to find a node which can launch a pod. We intentionally do
// not just take the node list and choose the first of them. Depending on the
// cluster and the scheduler it might be that a "normal" pod cannot be
// scheduled onto it.
pod := runPausePod(f, conf)
By("Explicitly delete pod here to free the resource it takes.")
err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
return pod.Spec.NodeName
}
func getRequestedCPU(pod v1.Pod) int64 {
var result int64
for _, container := range pod.Spec.Containers {
result += container.Resources.Requests.Cpu().MilliValue()
}
return result
}
func getRequestedStorageEphemeralStorage(pod v1.Pod) int64 {
var result int64
for _, container := range pod.Spec.Containers {
result += container.Resources.Requests.StorageEphemeral().MilliValue()
}
return result
}
// removeTaintFromNodeAction returns a closure that removes the given taint
// from the given node upon invocation.
func removeTaintFromNodeAction(cs clientset.Interface, nodeName string, testTaint v1.Taint) common.Action {
return func() error {
framework.RemoveTaintOffNode(cs, nodeName, testTaint)
return nil
}
}
// createPausePodAction returns a closure that creates a pause pod upon invocation.
func createPausePodAction(f *framework.Framework, conf pausePodConfig) common.Action {
return func() error {
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(initPausePod(f, conf))
return err
}
}
// WaitForSchedulerAfterAction performs the provided action and then waits for
// scheduler to act on the given pod.
func WaitForSchedulerAfterAction(f *framework.Framework, action common.Action, ns, podName string, expectSuccess bool) {
predicate := scheduleFailureEvent(podName)
if expectSuccess {
predicate = scheduleSuccessEvent(ns, podName, "" /* any node */)
}
success, err := common.ObserveEventAfterAction(f, predicate, action)
Expect(err).NotTo(HaveOccurred())
Expect(success).To(Equal(true))
}
// TODO: upgrade calls in PodAffinity tests when we're able to run them
func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) {
allPods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
framework.ExpectNoError(err)
scheduledPods, notScheduledPods := framework.GetPodsScheduled(masterNodes, allPods)
printed := false
printOnce := func(msg string) string {
if !printed {
printed = true
return msg
} else {
return ""
}
}
Expect(len(notScheduledPods)).To(Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
Expect(len(scheduledPods)).To(Equal(expectedScheduled), printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)))
}
// verifyReplicasResult is wrapper of verifyResult for a group pods with same "name: labelName" label, which means they belong to same RC
func verifyReplicasResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string, labelName string) {
allPods := getPodsByLabels(c, ns, map[string]string{"name": labelName})
scheduledPods, notScheduledPods := framework.GetPodsScheduled(masterNodes, allPods)
printed := false
printOnce := func(msg string) string {
if !printed {
printed = true
return msg
} else {
return ""
}
}
Expect(len(notScheduledPods)).To(Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
Expect(len(scheduledPods)).To(Equal(expectedScheduled), printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)))
}
func getPodsByLabels(c clientset.Interface, ns string, labelsMap map[string]string) *v1.PodList {
selector := labels.SelectorFromSet(labels.Set(labelsMap))
allPods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: selector.String()})
framework.ExpectNoError(err)
return allPods
}
func runAndKeepPodWithLabelAndGetNodeName(f *framework.Framework) (string, string) {
// launch a pod to find a node which can launch a pod. We intentionally do
// not just take the node list and choose the first of them. Depending on the
// cluster and the scheduler it might be that a "normal" pod cannot be
// scheduled onto it.
By("Trying to launch a pod with a label to get a node which can launch it.")
pod := runPausePod(f, pausePodConfig{
Name: "with-label-" + string(uuid.NewUUID()),
Labels: map[string]string{"security": "S1"},
})
return pod.Spec.NodeName, pod.Name
}
func GetNodeThatCanRunPod(f *framework.Framework) string {
By("Trying to launch a pod without a label to get a node which can launch it.")
return runPodAndGetNodeName(f, pausePodConfig{Name: "without-label"})
}
func getNodeThatCanRunPodWithoutToleration(f *framework.Framework) string {
By("Trying to launch a pod without a toleration to get a node which can launch it.")
return runPodAndGetNodeName(f, pausePodConfig{Name: "without-toleration"})
}
func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
By(fmt.Sprintf("Running RC which reserves host port"))
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: f.Namespace.Name,
Timeout: defaultTimeout,
Image: imageutils.GetPauseImageName(),
Replicas: replicas,
HostPorts: map[string]int{"port1": 4321},
}
err := framework.RunRC(*config)
if expectRunning {
framework.ExpectNoError(err)
}
}
// create pod which using hostport on the specified node according to the nodeSelector
func creatHostPortPodOnNode(f *framework.Framework, podName, ns, hostIP string, port int32, protocol v1.Protocol, nodeSelector map[string]string, expectScheduled bool) {
createPausePod(f, pausePodConfig{
Name: podName,
Ports: []v1.ContainerPort{
{
HostPort: port,
ContainerPort: 80,
Protocol: protocol,
HostIP: hostIP,
},
},
NodeSelector: nodeSelector,
})
err := framework.WaitForPodNotPending(f.ClientSet, ns, podName)
if expectScheduled {
framework.ExpectNoError(err)
}
}

View File

@@ -0,0 +1,343 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"fmt"
"time"
"k8s.io/api/core/v1"
schedulerapi "k8s.io/api/scheduling/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/apis/scheduling"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
_ "github.com/stretchr/testify/assert"
)
var _ = SIGDescribe("SchedulerPreemption [Serial] [Feature:PodPreemption]", func() {
var cs clientset.Interface
var nodeList *v1.NodeList
var ns string
f := framework.NewDefaultFramework("sched-preemption")
lowPriority, mediumPriority, highPriority := int32(1), int32(100), int32(1000)
lowPriorityClassName := f.BaseName + "-low-priority"
mediumPriorityClassName := f.BaseName + "-medium-priority"
highPriorityClassName := f.BaseName + "-high-priority"
AfterEach(func() {
})
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
nodeList = &v1.NodeList{}
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
_, err = f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: mediumPriorityClassName}, Value: mediumPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
_, err = f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: lowPriorityClassName}, Value: lowPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
framework.WaitForAllNodesHealthy(cs, time.Minute)
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
err = framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
})
// This test verifies that when a higher priority pod is created and no node with
// enough resources is found, scheduler preempts a lower priority pod to schedule
// the high priority pod.
It("validates basic preemption works", func() {
var podRes v1.ResourceList
// Create one pod per node that uses a lot of the node's resources.
By("Create pods that use 60% of node resources.")
pods := make([]*v1.Pod, len(nodeList.Items))
for i, node := range nodeList.Items {
cpuAllocatable, found := node.Status.Allocatable["cpu"]
Expect(found).To(Equal(true))
milliCPU := cpuAllocatable.MilliValue() * 40 / 100
memAllocatable, found := node.Status.Allocatable["memory"]
Expect(found).To(Equal(true))
memory := memAllocatable.Value() * 60 / 100
podRes = v1.ResourceList{}
podRes[v1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
podRes[v1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
// make the first pod low priority and the rest medium priority.
priorityName := mediumPriorityClassName
if i == 0 {
priorityName = lowPriorityClassName
}
pods[i] = createPausePod(f, pausePodConfig{
Name: fmt.Sprintf("pod%d-%v", i, priorityName),
PriorityClassName: priorityName,
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
})
framework.Logf("Created pod: %v", pods[i].Name)
}
By("Wait for pods to be scheduled.")
for _, pod := range pods {
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
}
By("Run a high priority pod that use 60% of a node resources.")
// Create a high priority pod and make sure it is scheduled.
runPausePod(f, pausePodConfig{
Name: "preemptor-pod",
PriorityClassName: highPriorityClassName,
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
})
// Make sure that the lowest priority pod is deleted.
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
podDeleted := (err != nil && errors.IsNotFound(err)) ||
(err == nil && preemptedPod.DeletionTimestamp != nil)
Expect(podDeleted).To(BeTrue())
// Other pods (mid priority ones) should be present.
for i := 1; i < len(pods); i++ {
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{})
framework.ExpectNoError(err)
Expect(livePod.DeletionTimestamp).To(BeNil())
}
})
// This test verifies that when a critical pod is created and no node with
// enough resources is found, scheduler preempts a lower priority pod to schedule
// this critical pod.
It("validates lower priority pod preemption by critical pod", func() {
var podRes v1.ResourceList
// Create one pod per node that uses a lot of the node's resources.
By("Create pods that use 60% of node resources.")
pods := make([]*v1.Pod, len(nodeList.Items))
for i, node := range nodeList.Items {
cpuAllocatable, found := node.Status.Allocatable["cpu"]
Expect(found).To(Equal(true))
milliCPU := cpuAllocatable.MilliValue() * 40 / 100
memAllocatable, found := node.Status.Allocatable["memory"]
Expect(found).To(Equal(true))
memory := memAllocatable.Value() * 60 / 100
podRes = v1.ResourceList{}
podRes[v1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
podRes[v1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
// make the first pod low priority and the rest medium priority.
priorityName := mediumPriorityClassName
if i == 0 {
priorityName = lowPriorityClassName
}
pods[i] = createPausePod(f, pausePodConfig{
Name: fmt.Sprintf("pod%d-%v", i, priorityName),
PriorityClassName: priorityName,
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
})
framework.Logf("Created pod: %v", pods[i].Name)
}
By("Wait for pods to be scheduled.")
for _, pod := range pods {
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
}
By("Run a critical pod that use 60% of a node resources.")
// Create a critical pod and make sure it is scheduled.
runPausePod(f, pausePodConfig{
Name: "critical-pod",
PriorityClassName: scheduling.SystemClusterCritical,
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
})
// Make sure that the lowest priority pod is deleted.
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
podDeleted := (err != nil && errors.IsNotFound(err)) ||
(err == nil && preemptedPod.DeletionTimestamp != nil)
Expect(podDeleted).To(BeTrue())
// Other pods (mid priority ones) should be present.
for i := 1; i < len(pods); i++ {
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{})
framework.ExpectNoError(err)
Expect(livePod.DeletionTimestamp).To(BeNil())
}
})
// This test verifies that when a high priority pod is pending and its
// scheduling violates a medium priority pod anti-affinity, the medium priority
// pod is preempted to allow the higher priority pod schedule.
// It also verifies that existing low priority pods are not preempted as their
// preemption wouldn't help.
It("validates pod anti-affinity works in preemption", func() {
var podRes v1.ResourceList
// Create a few pods that uses a small amount of resources.
By("Create pods that use 10% of node resources.")
numPods := 4
if len(nodeList.Items) < numPods {
numPods = len(nodeList.Items)
}
pods := make([]*v1.Pod, numPods)
for i := 0; i < numPods; i++ {
node := nodeList.Items[i]
cpuAllocatable, found := node.Status.Allocatable["cpu"]
Expect(found).To(BeTrue())
milliCPU := cpuAllocatable.MilliValue() * 10 / 100
memAllocatable, found := node.Status.Allocatable["memory"]
Expect(found).To(BeTrue())
memory := memAllocatable.Value() * 10 / 100
podRes = v1.ResourceList{}
podRes[v1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
podRes[v1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
// Apply node label to each node
framework.AddOrUpdateLabelOnNode(cs, node.Name, "node", node.Name)
framework.ExpectNodeHasLabel(cs, node.Name, "node", node.Name)
// make the first pod medium priority and the rest low priority.
priorityName := lowPriorityClassName
if i == 0 {
priorityName = mediumPriorityClassName
}
pods[i] = createPausePod(f, pausePodConfig{
Name: fmt.Sprintf("pod%d-%v", i, priorityName),
PriorityClassName: priorityName,
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
Affinity: &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "service",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"blah", "foo"},
},
},
},
TopologyKey: "node",
},
},
},
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "node",
Operator: v1.NodeSelectorOpIn,
Values: []string{node.Name},
},
},
},
},
},
},
},
})
framework.Logf("Created pod: %v", pods[i].Name)
}
defer func() { // Remove added labels
for i := 0; i < numPods; i++ {
framework.RemoveLabelOffNode(cs, nodeList.Items[i].Name, "node")
}
}()
By("Wait for pods to be scheduled.")
for _, pod := range pods {
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
}
By("Run a high priority pod with node affinity to the first node.")
// Create a high priority pod and make sure it is scheduled.
runPausePod(f, pausePodConfig{
Name: "preemptor-pod",
PriorityClassName: highPriorityClassName,
Labels: map[string]string{"service": "blah"},
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "node",
Operator: v1.NodeSelectorOpIn,
Values: []string{nodeList.Items[0].Name},
},
},
},
},
},
},
},
})
// Make sure that the medium priority pod on the first node is preempted.
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
podDeleted := (err != nil && errors.IsNotFound(err)) ||
(err == nil && preemptedPod.DeletionTimestamp != nil)
Expect(podDeleted).To(BeTrue())
// Other pods (low priority ones) should be present.
for i := 1; i < len(pods); i++ {
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{})
framework.ExpectNoError(err)
Expect(livePod.DeletionTimestamp).To(BeNil())
}
})
})
var _ = SIGDescribe("PodPriorityResolution [Serial] [Feature:PodPreemption]", func() {
var cs clientset.Interface
var ns string
f := framework.NewDefaultFramework("sched-pod-priority")
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
err := framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
})
// This test verifies that system critical priorities are created automatically and resolved properly.
It("validates critical system priorities are created and resolved", func() {
// Create pods that use system critical priorities and
By("Create pods that use critical system priorities.")
systemPriorityClasses := []string{
scheduling.SystemNodeCritical, scheduling.SystemClusterCritical,
}
for i, spc := range systemPriorityClasses {
pod := createPausePod(f, pausePodConfig{
Name: fmt.Sprintf("pod%d-%v", i, spc),
PriorityClassName: spc,
})
Expect(pod.Spec.Priority).NotTo(BeNil())
framework.Logf("Created pod: %v", pod.Name)
}
})
})

View File

@@ -0,0 +1,402 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"encoding/json"
"fmt"
"math"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
_ "github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
type Resource struct {
MilliCPU int64
Memory int64
}
var balancePodLabel map[string]string = map[string]string{"name": "priority-balanced-memory"}
var podRequestedResource *v1.ResourceRequirements = &v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("100Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
},
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("100Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
},
}
// This test suite is used to verifies scheduler priority functions based on the default provider
var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
var cs clientset.Interface
var nodeList *v1.NodeList
var systemPodsNo int
var ns string
f := framework.NewDefaultFramework("sched-priority")
ignoreLabels := framework.ImagePullerLabels
AfterEach(func() {
})
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
nodeList = &v1.NodeList{}
framework.WaitForAllNodesHealthy(cs, time.Minute)
_, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
err := framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
err = framework.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, ignoreLabels)
Expect(err).NotTo(HaveOccurred())
})
It("Pod should be schedule to node that don't match the PodAntiAffinity terms", func() {
By("Trying to launch a pod with a label to get a node which can launch it.")
pod := runPausePod(f, pausePodConfig{
Name: "pod-with-label-security-s1",
Labels: map[string]string{"security": "S1"},
})
nodeName := pod.Spec.NodeName
By("Trying to apply a label on the found node.")
k := fmt.Sprintf("kubernetes.io/e2e-%s", "node-topologyKey")
v := "topologyvalue"
framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(cs, nodeName, k)
// make the nodes have balanced cpu,mem usage
err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.6)
framework.ExpectNoError(err)
By("Trying to launch the pod with podAntiAffinity.")
labelPodName := "pod-with-pod-antiaffinity"
pod = createPausePod(f, pausePodConfig{
Resources: podRequestedResource,
Name: labelPodName,
Affinity: &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{
{
PodAffinityTerm: v1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "security",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"S1", "value2"},
},
{
Key: "security",
Operator: metav1.LabelSelectorOpNotIn,
Values: []string{"S2"},
}, {
Key: "security",
Operator: metav1.LabelSelectorOpExists,
},
},
},
TopologyKey: k,
Namespaces: []string{ns},
},
Weight: 10,
},
},
},
},
})
By("Wait the pod becomes running")
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{})
framework.ExpectNoError(err)
By("Verify the pod was scheduled to the expected node.")
Expect(labelPod.Spec.NodeName).NotTo(Equal(nodeName))
})
It("Pod should avoid to schedule to node that have avoidPod annotation", func() {
nodeName := nodeList.Items[0].Name
// make the nodes have balanced cpu,mem usage
err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5)
framework.ExpectNoError(err)
By("Create a RC, with 0 replicas")
rc := createRC(ns, "scheduler-priority-avoid-pod", int32(0), map[string]string{"name": "scheduler-priority-avoid-pod"}, f, podRequestedResource)
// Cleanup the replication controller when we are done.
defer func() {
// Resize the replication controller to zero to get rid of pods.
if err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rc.Name); err != nil {
framework.Logf("Failed to cleanup replication controller %v: %v.", rc.Name, err)
}
}()
By("Trying to apply avoidPod annotations on the first node.")
avoidPod := v1.AvoidPods{
PreferAvoidPods: []v1.PreferAvoidPodsEntry{
{
PodSignature: v1.PodSignature{
PodController: &metav1.OwnerReference{
APIVersion: "v1",
Kind: "ReplicationController",
Name: rc.Name,
UID: rc.UID,
Controller: func() *bool { b := true; return &b }(),
},
},
Reason: "some reson",
Message: "some message",
},
},
}
action := func() error {
framework.AddOrUpdateAvoidPodOnNode(cs, nodeName, avoidPod)
return nil
}
predicate := func(node *v1.Node) bool {
val, err := json.Marshal(avoidPod)
if err != nil {
return false
}
return node.Annotations[v1.PreferAvoidPodsAnnotationKey] == string(val)
}
success, err := common.ObserveNodeUpdateAfterAction(f, nodeName, predicate, action)
Expect(err).NotTo(HaveOccurred())
Expect(success).To(Equal(true))
defer framework.RemoveAvoidPodsOffNode(cs, nodeName)
By(fmt.Sprintf("Scale the RC: %s to len(nodeList.Item)-1 : %v.", rc.Name, len(nodeList.Items)-1))
framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rc.Name, uint(len(nodeList.Items)-1), true)
testPods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{
LabelSelector: "name=scheduler-priority-avoid-pod",
})
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Verify the pods should not scheduled to the node: %s", nodeName))
for _, pod := range testPods.Items {
Expect(pod.Spec.NodeName).NotTo(Equal(nodeName))
}
})
It("Pod should perfer to scheduled to nodes pod can tolerate", func() {
// make the nodes have balanced cpu,mem usage ratio
err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5)
framework.ExpectNoError(err)
//we need apply more taints on a node, because one match toleration only count 1
By("Trying to apply 10 taint on the nodes except first one.")
nodeName := nodeList.Items[0].Name
for index, node := range nodeList.Items {
if index == 0 {
continue
}
for i := 0; i < 10; i++ {
testTaint := addRandomTaitToNode(cs, node.Name)
defer framework.RemoveTaintOffNode(cs, node.Name, *testTaint)
}
}
By("Create a pod without any tolerations")
tolerationPodName := "without-tolerations"
pod := createPausePod(f, pausePodConfig{
Name: tolerationPodName,
})
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("Pod should prefer scheduled to the node don't have the taint.")
tolePod, err := cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(tolePod.Spec.NodeName).To(Equal(nodeName))
By("Trying to apply 10 taint on the first node.")
var tolerations []v1.Toleration
for i := 0; i < 10; i++ {
testTaint := addRandomTaitToNode(cs, nodeName)
tolerations = append(tolerations, v1.Toleration{Key: testTaint.Key, Value: testTaint.Value, Effect: testTaint.Effect})
defer framework.RemoveTaintOffNode(cs, nodeName, *testTaint)
}
tolerationPodName = "with-tolerations"
By("Create a pod that tolerates all the taints of the first node.")
pod = createPausePod(f, pausePodConfig{
Name: tolerationPodName,
Tolerations: tolerations,
})
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("Pod should prefer scheduled to the node that pod can tolerate.")
tolePod, err = cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(tolePod.Spec.NodeName).To(Equal(nodeName))
})
})
// createBalancedPodForNodes creates a pod per node that asks for enough resources to make all nodes have the same mem/cpu usage ratio.
func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, ns string, nodes []v1.Node, requestedResource *v1.ResourceRequirements, ratio float64) error {
// find the max, if the node has the max,use the one, if not,use the ratio parameter
var maxCPUFraction, maxMemFraction float64 = ratio, ratio
var cpuFractionMap = make(map[string]float64)
var memFractionMap = make(map[string]float64)
for _, node := range nodes {
cpuFraction, memFraction := computeCpuMemFraction(cs, node, requestedResource)
cpuFractionMap[node.Name] = cpuFraction
memFractionMap[node.Name] = memFraction
if cpuFraction > maxCPUFraction {
maxCPUFraction = cpuFraction
}
if memFraction > maxMemFraction {
maxMemFraction = memFraction
}
}
// we need the max one to keep the same cpu/mem use rate
ratio = math.Max(maxCPUFraction, maxMemFraction)
for _, node := range nodes {
memAllocatable, found := node.Status.Allocatable[v1.ResourceMemory]
Expect(found).To(Equal(true))
memAllocatableVal := memAllocatable.Value()
cpuAllocatable, found := node.Status.Allocatable[v1.ResourceCPU]
Expect(found).To(Equal(true))
cpuAllocatableMil := cpuAllocatable.MilliValue()
needCreateResource := v1.ResourceList{}
cpuFraction := cpuFractionMap[node.Name]
memFraction := memFractionMap[node.Name]
needCreateResource[v1.ResourceCPU] = *resource.NewMilliQuantity(int64((ratio-cpuFraction)*float64(cpuAllocatableMil)), resource.DecimalSI)
needCreateResource[v1.ResourceMemory] = *resource.NewQuantity(int64((ratio-memFraction)*float64(memAllocatableVal)), resource.BinarySI)
err := testutils.StartPods(cs, 1, ns, string(uuid.NewUUID()),
*initPausePod(f, pausePodConfig{
Name: "",
Labels: balancePodLabel,
Resources: &v1.ResourceRequirements{
Limits: needCreateResource,
Requests: needCreateResource,
},
NodeName: node.Name,
}), true, framework.Logf)
if err != nil {
return err
}
}
for _, node := range nodes {
By("Compute Cpu, Mem Fraction after create balanced pods.")
computeCpuMemFraction(cs, node, requestedResource)
}
return nil
}
func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.ResourceRequirements) (float64, float64) {
framework.Logf("ComputeCpuMemFraction for node: %v", node.Name)
totalRequestedCpuResource := resource.Requests.Cpu().MilliValue()
totalRequestedMemResource := resource.Requests.Memory().Value()
allpods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
if err != nil {
framework.Failf("Expect error of invalid, got : %v", err)
}
for _, pod := range allpods.Items {
if pod.Spec.NodeName == node.Name {
framework.Logf("Pod for on the node: %v, Cpu: %v, Mem: %v", pod.Name, getNonZeroRequests(&pod).MilliCPU, getNonZeroRequests(&pod).Memory)
totalRequestedCpuResource += getNonZeroRequests(&pod).MilliCPU
totalRequestedMemResource += getNonZeroRequests(&pod).Memory
}
}
cpuAllocatable, found := node.Status.Allocatable[v1.ResourceCPU]
Expect(found).To(Equal(true))
cpuAllocatableMil := cpuAllocatable.MilliValue()
cpuFraction := float64(totalRequestedCpuResource) / float64(cpuAllocatableMil)
memAllocatable, found := node.Status.Allocatable[v1.ResourceMemory]
Expect(found).To(Equal(true))
memAllocatableVal := memAllocatable.Value()
memFraction := float64(totalRequestedMemResource) / float64(memAllocatableVal)
framework.Logf("Node: %v, totalRequestedCpuResource: %v, cpuAllocatableMil: %v, cpuFraction: %v", node.Name, totalRequestedCpuResource, cpuAllocatableMil, cpuFraction)
framework.Logf("Node: %v, totalRequestedMemResource: %v, memAllocatableVal: %v, memFraction: %v", node.Name, totalRequestedMemResource, memAllocatableVal, memFraction)
return cpuFraction, memFraction
}
func getNonZeroRequests(pod *v1.Pod) Resource {
result := Resource{}
for i := range pod.Spec.Containers {
container := &pod.Spec.Containers[i]
cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests)
result.MilliCPU += cpu
result.Memory += memory
}
return result
}
func createRC(ns, rsName string, replicas int32, rcPodLabels map[string]string, f *framework.Framework, resource *v1.ResourceRequirements) *v1.ReplicationController {
rc := &v1.ReplicationController{
TypeMeta: metav1.TypeMeta{
Kind: "ReplicationController",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: rsName,
},
Spec: v1.ReplicationControllerSpec{
Replicas: &replicas,
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: rcPodLabels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: rsName,
Image: imageutils.GetPauseImageName(),
Resources: *resource,
},
},
},
},
},
}
rc, err := f.ClientSet.CoreV1().ReplicationControllers(ns).Create(rc)
Expect(err).NotTo(HaveOccurred())
return rc
}
func addRandomTaitToNode(cs clientset.Interface, nodeName string) *v1.Taint {
testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-%s", string(uuid.NewUUID())),
Value: fmt.Sprintf("testing-taint-value-%s", string(uuid.NewUUID())),
Effect: v1.TaintEffectPreferNoSchedule,
}
framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
return &testTaint
}

View File

@@ -0,0 +1,133 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"fmt"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
defaultTimeout = 3 * time.Minute
)
// This test requires Rescheduler to be enabled.
var _ = SIGDescribe("Rescheduler [Serial]", func() {
f := framework.NewDefaultFramework("rescheduler")
var ns string
var totalMillicores int
BeforeEach(func() {
framework.Skipf("Rescheduler is being deprecated soon in Kubernetes 1.10")
ns = f.Namespace.Name
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount := len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
totalMillicores = int((&cpu).MilliValue()) * nodeCount
})
It("should ensure that critical pod is scheduled in case there is no resources available", func() {
By("reserving all available cpu")
err := reserveAllCpu(f, "reserve-all-cpu", totalMillicores)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, "reserve-all-cpu")
framework.ExpectNoError(err)
By("creating a new instance of Dashboard and waiting for Dashboard to be scheduled")
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kubernetes-dashboard"}))
listOpts := metav1.ListOptions{LabelSelector: label.String()}
deployments, err := f.ClientSet.ExtensionsV1beta1().Deployments(metav1.NamespaceSystem).List(listOpts)
framework.ExpectNoError(err)
Expect(len(deployments.Items)).Should(Equal(1))
deployment := deployments.Items[0]
replicas := uint(*(deployment.Spec.Replicas))
err = framework.ScaleDeployment(f.ClientSet, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas+1, true)
defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas, true))
framework.ExpectNoError(err)
})
})
func reserveAllCpu(f *framework.Framework, id string, millicores int) error {
timeout := 5 * time.Minute
replicas := millicores / 100
reserveCpu(f, id, 1, 100)
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, id, uint(replicas), false))
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
pods, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, framework.ImagePullerLabels)
if err != nil {
return err
}
if len(pods) != replicas {
continue
}
allRunningOrUnschedulable := true
for _, pod := range pods {
if !podRunningOrUnschedulable(pod) {
allRunningOrUnschedulable = false
break
}
}
if allRunningOrUnschedulable {
return nil
}
}
return fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", id, timeout, replicas)
}
func podRunningOrUnschedulable(pod *v1.Pod) bool {
_, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
if cond != nil && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" {
return true
}
running, _ := testutils.PodRunningReady(pod)
return running
}
func reserveCpu(f *framework.Framework, id string, replicas, millicores int) {
By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
request := int64(millicores / replicas)
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: f.Namespace.Name,
Timeout: defaultTimeout,
Image: imageutils.GetPauseImageName(),
Replicas: replicas,
CpuRequest: request,
}
framework.ExpectNoError(framework.RunRC(*config))
}

View File

@@ -0,0 +1,959 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"fmt"
"time"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/quota/evaluator/core"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
// how long to wait for a resource quota update to occur
resourceQuotaTimeout = 30 * time.Second
)
var classGold string = "gold"
var extendedResourceName string = "example.com/dongle"
var _ = SIGDescribe("ResourceQuota", func() {
f := framework.NewDefaultFramework("resourcequota")
It("should create a ResourceQuota and ensure its status is promptly calculated.", func() {
By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should create a ResourceQuota and capture the life of a service.", func() {
By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a Service")
service := newTestServiceForQuota("test-service", v1.ServiceTypeClusterIP)
service, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(service)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures service creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[v1.ResourceServices] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting a Service")
err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(service.Name, nil)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage")
usedResources[v1.ResourceServices] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should create a ResourceQuota and capture the life of a secret.", func() {
By("Discovering how many secrets are in namespace by default")
found, unchanged := 0, 0
wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) {
secrets, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
if len(secrets.Items) == found {
// loop until the number of secrets has stabilized for 5 seconds
unchanged++
return unchanged > 4, nil
}
unchanged = 0
found = len(secrets.Items)
return false, nil
})
defaultSecrets := fmt.Sprintf("%d", found)
hardSecrets := fmt.Sprintf("%d", found+1)
By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
resourceQuota.Spec.Hard[v1.ResourceSecrets] = resource.MustParse(hardSecrets)
resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[v1.ResourceSecrets] = resource.MustParse(defaultSecrets)
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a Secret")
secret := newTestSecretForQuota("test-secret")
secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures secret creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceSecrets] = resource.MustParse(hardSecrets)
// we expect there to be two secrets because each namespace will receive
// a service account token secret by default
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting a secret")
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage")
usedResources[v1.ResourceSecrets] = resource.MustParse(defaultSecrets)
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("[Feature:Initializers] should create a ResourceQuota and capture the life of an uninitialized pod.", func() {
By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating an uninitialized Pod that fits quota")
podName := "test-pod"
requests := v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("252Mi")
pod := newTestPodForQuota(f, podName, requests, v1.ResourceList{})
pod.Initializers = &metav1.Initializers{Pending: []metav1.Initializer{{Name: "unhandled"}}}
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
// because no one is handling the initializer, server will return a 504 timeout
if err != nil && !errors.IsTimeout(err) {
framework.Failf("expect err to be timeout error, got %v", err)
}
createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(podName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("Ensuring only pod count is charged")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Ensuring an uninitialized pod can update its resource requirements")
// a pod cannot dynamically update its resource requirements.
requests = v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("100m")
requests[v1.ResourceMemory] = resource.MustParse("100Mi")
_, err = framework.UpdatePodWithRetries(f.ClientSet, f.Namespace.Name, createdPod.Name, func(p *v1.Pod) {
p.Spec.Containers[0].Resources.Requests = requests
})
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status doesn't change")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Allowing initializing a Pod that fits quota")
_, err = framework.UpdatePodWithRetries(f.ClientSet, f.Namespace.Name, createdPod.Name, func(p *v1.Pod) {
p.Initializers = nil
})
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status captures the usage of the intialized pod")
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceCPU] = requests[v1.ResourceCPU]
usedResources[v1.ResourceMemory] = requests[v1.ResourceMemory]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(createdPod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceCPU] = resource.MustParse("0")
usedResources[v1.ResourceMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Allowing creating an uninitialized pod that exceeds remaining quota")
requests = v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("1100m")
requests[v1.ResourceMemory] = resource.MustParse("100Mi")
podName = "too-large-pod"
pod = newTestPodForQuota(f, podName, requests, v1.ResourceList{})
pod.Initializers = &metav1.Initializers{Pending: []metav1.Initializer{{Name: "unhandled"}}}
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
// because no one is handling the initializer, server will return a 504 timeout
if err != nil && !errors.IsTimeout(err) {
framework.Failf("expect err to be timeout error, got %v", err)
}
createdPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(podName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("Ensuring only charges pod count")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Disallowing initializing a Pod that doesn't fit quota")
_, err = framework.UpdatePodWithRetries(f.ClientSet, f.Namespace.Name, createdPod.Name, func(p *v1.Pod) {
p.Initializers = nil
})
Expect(err).To(HaveOccurred())
By("Ensuring ResourceQuota status doesn't change")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(createdPod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status doesn't change")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
// TODO: This is a bug. We need 51247 to fix it.
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should create a ResourceQuota and capture the life of a pod.", func() {
By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a Pod that fits quota")
podName := "test-pod"
requests := v1.ResourceList{}
limits := v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("252Mi")
requests[v1.ResourceEphemeralStorage] = resource.MustParse("30Gi")
requests[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
pod := newTestPodForQuota(f, podName, requests, limits)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
podToUpdate := pod
By("Ensuring ResourceQuota status captures the pod usage")
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceCPU] = requests[v1.ResourceCPU]
usedResources[v1.ResourceMemory] = requests[v1.ResourceMemory]
usedResources[v1.ResourceEphemeralStorage] = requests[v1.ResourceEphemeralStorage]
usedResources[v1.ResourceName(v1.DefaultResourceRequestsPrefix+extendedResourceName)] = requests[v1.ResourceName(extendedResourceName)]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Not allowing a pod to be created that exceeds remaining quota")
requests = v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("600m")
requests[v1.ResourceMemory] = resource.MustParse("100Mi")
pod = newTestPodForQuota(f, "fail-pod", requests, v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
By("Not allowing a pod to be created that exceeds remaining quota(validation on extended resources)")
requests = v1.ResourceList{}
limits = v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("100Mi")
requests[v1.ResourceEphemeralStorage] = resource.MustParse("30Gi")
requests[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
pod = newTestPodForQuota(f, "fail-pod-for-extended-resource", requests, limits)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
By("Ensuring a pod cannot update its resource requirements")
// a pod cannot dynamically update its resource requirements.
requests = v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("100m")
requests[v1.ResourceMemory] = resource.MustParse("100Mi")
requests[v1.ResourceEphemeralStorage] = resource.MustParse("10Gi")
podToUpdate.Spec.Containers[0].Resources.Requests = requests
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(podToUpdate)
Expect(err).To(HaveOccurred())
By("Ensuring attempts to update pod resource requirements did not change quota usage")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceCPU] = resource.MustParse("0")
usedResources[v1.ResourceMemory] = resource.MustParse("0")
usedResources[v1.ResourceEphemeralStorage] = resource.MustParse("0")
usedResources[v1.ResourceName(v1.DefaultResourceRequestsPrefix+extendedResourceName)] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should create a ResourceQuota and capture the life of a configMap.", func() {
By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a ConfigMap")
configMap := newTestConfigMapForQuota("test-configmap")
configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures configMap creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[v1.ResourceConfigMaps] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting a ConfigMap")
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMap.Name, nil)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage")
usedResources[v1.ResourceConfigMaps] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should create a ResourceQuota and capture the life of a replication controller.", func() {
By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[v1.ResourceReplicationControllers] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a ReplicationController")
replicationController := newTestReplicationControllerForQuota("test-rc", "nginx", 0)
replicationController, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(replicationController)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures replication controller creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceReplicationControllers] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting a ReplicationController")
err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Delete(replicationController.Name, nil)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage")
usedResources[v1.ResourceReplicationControllers] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should create a ResourceQuota and capture the life of a replica set.", func() {
By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[v1.ResourceName("count/replicasets.extensions")] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a ReplicaSet")
replicaSet := newTestReplicaSetForQuota("test-rs", "nginx", 0)
replicaSet, err = f.ClientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).Create(replicaSet)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures replicaset creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceName("count/replicasets.extensions")] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting a ReplicaSet")
err = f.ClientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).Delete(replicaSet.Name, nil)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage")
usedResources[v1.ResourceName("count/replicasets.extensions")] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should create a ResourceQuota and capture the life of a persistent volume claim. [sig-storage]", func() {
By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a PersistentVolumeClaim")
pvc := newTestPersistentVolumeClaimForQuota("test-claim")
pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(pvc)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures persistent volume claim creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("1")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("1Gi")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting a PersistentVolumeClaim")
err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(pvc.Name, nil)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage")
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should create a ResourceQuota and capture the life of a persistent volume claim with a storage class. [sig-storage]", func() {
By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourcePersistentVolumeClaims)] = resource.MustParse("0")
usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourceRequestsStorage)] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a PersistentVolumeClaim with storage class")
pvc := newTestPersistentVolumeClaimForQuota("test-claim")
pvc.Spec.StorageClassName = &classGold
pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(pvc)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures persistent volume claim creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("1")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("1Gi")
usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourcePersistentVolumeClaims)] = resource.MustParse("1")
usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourceRequestsStorage)] = resource.MustParse("1Gi")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting a PersistentVolumeClaim")
err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(pvc.Name, nil)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage")
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourcePersistentVolumeClaims)] = resource.MustParse("0")
usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourceRequestsStorage)] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should verify ResourceQuota with terminating scopes.", func() {
By("Creating a ResourceQuota with terminating scope")
quotaTerminatingName := "quota-terminating"
resourceQuotaTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaTerminatingName, v1.ResourceQuotaScopeTerminating))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a ResourceQuota with not terminating scope")
quotaNotTerminatingName := "quota-not-terminating"
resourceQuotaNotTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaNotTerminatingName, v1.ResourceQuotaScopeNotTerminating))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a long running pod")
podName := "test-pod"
requests := v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("200Mi")
limits := v1.ResourceList{}
limits[v1.ResourceCPU] = resource.MustParse("1")
limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod := newTestPodForQuota(f, podName, requests, limits)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not terminating scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with terminating scope ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a terminating pod")
podName = "terminating-pod"
pod = newTestPodForQuota(f, podName, requests, limits)
activeDeadlineSeconds := int64(3600)
pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with terminating scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not terminating scope ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should verify ResourceQuota with best effort scope.", func() {
By("Creating a ResourceQuota with best effort scope")
resourceQuotaBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-besteffort", v1.ResourceQuotaScopeBestEffort))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a ResourceQuota with not best effort scope")
resourceQuotaNotBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-not-besteffort", v1.ResourceQuotaScopeNotBestEffort))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a best-effort pod")
pod := newTestPodForQuota(f, podName, v1.ResourceList{}, v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with best effort scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not best effort ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a not best-effort pod")
requests := v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("200Mi")
limits := v1.ResourceList{}
limits[v1.ResourceCPU] = resource.MustParse("1")
limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod = newTestPodForQuota(f, "burstable-pod", requests, limits)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not best effort scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with best effort scope ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
})
})
// newTestResourceQuotaWithScope returns a quota that enforces default constraints for testing with scopes
func newTestResourceQuotaWithScope(name string, scope v1.ResourceQuotaScope) *v1.ResourceQuota {
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("5")
switch scope {
case v1.ResourceQuotaScopeTerminating, v1.ResourceQuotaScopeNotTerminating:
hard[v1.ResourceRequestsCPU] = resource.MustParse("1")
hard[v1.ResourceRequestsMemory] = resource.MustParse("500Mi")
hard[v1.ResourceLimitsCPU] = resource.MustParse("2")
hard[v1.ResourceLimitsMemory] = resource.MustParse("1Gi")
}
return &v1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.ResourceQuotaSpec{Hard: hard, Scopes: []v1.ResourceQuotaScope{scope}},
}
}
// newTestResourceQuotaForEphemeralStorage returns a quota that enforces default constraints for testing feature LocalStorageCapacityIsolation
func newTestResourceQuotaForEphemeralStorage(name string) *v1.ResourceQuota {
hard := v1.ResourceList{}
hard[v1.ResourceEphemeralStorage] = resource.MustParse("500Mi")
hard[v1.ResourceQuotas] = resource.MustParse("1")
return &v1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.ResourceQuotaSpec{Hard: hard},
}
}
// newTestResourceQuota returns a quota that enforces default constraints for testing
func newTestResourceQuota(name string) *v1.ResourceQuota {
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("5")
hard[v1.ResourceServices] = resource.MustParse("10")
hard[v1.ResourceServicesNodePorts] = resource.MustParse("1")
hard[v1.ResourceServicesLoadBalancers] = resource.MustParse("1")
hard[v1.ResourceReplicationControllers] = resource.MustParse("10")
hard[v1.ResourceQuotas] = resource.MustParse("1")
hard[v1.ResourceCPU] = resource.MustParse("1")
hard[v1.ResourceMemory] = resource.MustParse("500Mi")
hard[v1.ResourceConfigMaps] = resource.MustParse("2")
hard[v1.ResourceSecrets] = resource.MustParse("10")
hard[v1.ResourcePersistentVolumeClaims] = resource.MustParse("10")
hard[v1.ResourceRequestsStorage] = resource.MustParse("10Gi")
hard[v1.ResourceEphemeralStorage] = resource.MustParse("50Gi")
hard[core.V1ResourceByStorageClass(classGold, v1.ResourcePersistentVolumeClaims)] = resource.MustParse("10")
hard[core.V1ResourceByStorageClass(classGold, v1.ResourceRequestsStorage)] = resource.MustParse("10Gi")
// test quota on discovered resource type
hard[v1.ResourceName("count/replicasets.extensions")] = resource.MustParse("5")
// test quota on extended resource
hard[v1.ResourceName(v1.DefaultResourceRequestsPrefix+extendedResourceName)] = resource.MustParse("3")
return &v1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.ResourceQuotaSpec{Hard: hard},
}
}
// newTestPodForQuota returns a pod that has the specified requests and limits
func newTestPodForQuota(f *framework.Framework, name string, requests v1.ResourceList, limits v1.ResourceList) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: imageutils.GetPauseImageName(),
Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,
},
},
},
},
}
}
// newTestPersistentVolumeClaimForQuota returns a simple persistent volume claim
func newTestPersistentVolumeClaimForQuota(name string) *v1.PersistentVolumeClaim {
return &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
v1.ReadWriteMany,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
},
},
},
}
}
// newTestReplicationControllerForQuota returns a simple replication controller
func newTestReplicationControllerForQuota(name, image string, replicas int32) *v1.ReplicationController {
return &v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.ReplicationControllerSpec{
Replicas: func(i int32) *int32 { return &i }(replicas),
Selector: map[string]string{
"name": name,
},
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"name": name},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: name,
Image: image,
},
},
},
},
},
}
}
// newTestReplicaSetForQuota returns a simple replica set
func newTestReplicaSetForQuota(name, image string, replicas int32) *extensions.ReplicaSet {
zero := int64(0)
return &extensions.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: extensions.ReplicaSetSpec{
Replicas: &replicas,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"name": name},
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &zero,
Containers: []v1.Container{
{
Name: name,
Image: image,
},
},
},
},
},
}
}
// newTestServiceForQuota returns a simple service
func newTestServiceForQuota(name string, serviceType v1.ServiceType) *v1.Service {
return &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.ServiceSpec{
Type: serviceType,
Ports: []v1.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
}},
},
}
}
func newTestConfigMapForQuota(name string) *v1.ConfigMap {
return &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Data: map[string]string{
"a": "b",
},
}
}
func newTestSecretForQuota(name string) *v1.Secret {
return &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Data: map[string][]byte{
"data-1": []byte("value-1\n"),
"data-2": []byte("value-2\n"),
"data-3": []byte("value-3\n"),
},
}
}
// createResourceQuota in the specified namespace
func createResourceQuota(c clientset.Interface, namespace string, resourceQuota *v1.ResourceQuota) (*v1.ResourceQuota, error) {
return c.CoreV1().ResourceQuotas(namespace).Create(resourceQuota)
}
// deleteResourceQuota with the specified name
func deleteResourceQuota(c clientset.Interface, namespace, name string) error {
return c.CoreV1().ResourceQuotas(namespace).Delete(name, nil)
}
// wait for resource quota status to show the expected used resources value
func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used v1.ResourceList) error {
return wait.Poll(framework.Poll, resourceQuotaTimeout, func() (bool, error) {
resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{})
if err != nil {
return false, err
}
// used may not yet be calculated
if resourceQuota.Status.Used == nil {
return false, nil
}
// verify that the quota shows the expected used resource values
for k, v := range used {
if actualValue, found := resourceQuota.Status.Used[k]; !found || (actualValue.Cmp(v) != 0) {
framework.Logf("resource %s, expected %s, actual %s", k, v.String(), actualValue.String())
return false, nil
}
}
return true, nil
})
}

View File

@@ -0,0 +1,320 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
_ "github.com/stretchr/testify/assert"
)
func getTestTaint() v1.Taint {
now := metav1.Now()
return v1.Taint{
Key: "kubernetes.io/e2e-evict-taint-key",
Value: "evictTaintVal",
Effect: v1.TaintEffectNoExecute,
TimeAdded: &now,
}
}
// Creates a defaut pod for this test, with argument saying if the Pod should have
// toleration for Taits used in this test.
func createPodForTaintsTest(hasToleration bool, tolerationSeconds int, podName, ns string) *v1.Pod {
grace := int64(1)
if !hasToleration {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: ns,
Labels: map[string]string{"name": podName},
DeletionGracePeriodSeconds: &grace,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: "kubernetes/pause",
},
},
},
}
} else {
if tolerationSeconds <= 0 {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: ns,
Labels: map[string]string{"name": podName},
DeletionGracePeriodSeconds: &grace,
// default - tolerate forever
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: "kubernetes/pause",
},
},
Tolerations: []v1.Toleration{{Key: "kubernetes.io/e2e-evict-taint-key", Value: "evictTaintVal", Effect: v1.TaintEffectNoExecute}},
},
}
} else {
ts := int64(tolerationSeconds)
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: ns,
Labels: map[string]string{"name": podName},
DeletionGracePeriodSeconds: &grace,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: "kubernetes/pause",
},
},
// default - tolerate forever
Tolerations: []v1.Toleration{{Key: "kubernetes.io/e2e-evict-taint-key", Value: "evictTaintVal", Effect: v1.TaintEffectNoExecute, TolerationSeconds: &ts}},
},
}
}
}
}
// Creates and starts a controller (informer) that watches updates on a pod in given namespace with given name. It puts a new
// struct into observedDeletion channel for every deletion it sees.
func createTestController(cs clientset.Interface, observedDeletions chan struct{}, stopCh chan struct{}, podName, ns string) {
_, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fields.SelectorFromSet(fields.Set{"metadata.name": podName}).String()
obj, err := cs.CoreV1().Pods(ns).List(options)
return runtime.Object(obj), err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = fields.SelectorFromSet(fields.Set{"metadata.name": podName}).String()
return cs.CoreV1().Pods(ns).Watch(options)
},
},
&v1.Pod{},
0,
cache.ResourceEventHandlerFuncs{
DeleteFunc: func(oldObj interface{}) { observedDeletions <- struct{}{} },
},
)
framework.Logf("Starting informer...")
go controller.Run(stopCh)
}
const (
KubeletPodDeletionDelaySeconds = 60
AdditionalWaitPerDeleteSeconds = 5
)
// Tests the behavior of NoExecuteTaintManager. Following scenarios are included:
// - eviction of non-tolerating pods from a tainted node,
// - lack of eviction of tolerating pods from a tainted node,
// - delayed eviction of short-tolerating pod from a tainted node,
// - lack of eviction of short-tolerating pod after taint removal.
var _ = SIGDescribe("NoExecuteTaintManager [Serial]", func() {
var cs clientset.Interface
var ns string
f := framework.NewDefaultFramework("taint-control")
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
framework.WaitForAllNodesHealthy(cs, time.Minute)
err := framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
})
// 1. Run a pod
// 2. Taint the node running this pod with a no-execute taint
// 3. See if pod will get evicted
It("evicts pods from tainted nodes", func() {
podName := "taint-eviction-1"
pod := createPodForTaintsTest(false, 0, podName, ns)
observedDeletions := make(chan struct{}, 100)
stopCh := make(chan struct{})
createTestController(cs, observedDeletions, stopCh, podName, ns)
By("Staring pod...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
framework.ExpectNoError(err)
framework.Logf("Pod is running on %v. Tainting Node", nodeName)
By("Trying to apply a taint on the Node")
testTaint := getTestTaint()
framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer framework.RemoveTaintOffNode(cs, nodeName, testTaint)
// Wait a bit
By("Waiting for Pod to be deleted")
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C
select {
case <-timeoutChannel:
framework.Failf("Failed to evict Pod")
case <-observedDeletions:
framework.Logf("Noticed Pod eviction. Test successful")
}
})
// 1. Run a pod with toleration
// 2. Taint the node running this pod with a no-execute taint
// 3. See if pod won't get evicted
It("doesn't evict pod with tolerations from tainted nodes", func() {
podName := "taint-eviction-2"
pod := createPodForTaintsTest(true, 0, podName, ns)
observedDeletions := make(chan struct{}, 100)
stopCh := make(chan struct{})
createTestController(cs, observedDeletions, stopCh, podName, ns)
By("Staring pod...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
framework.ExpectNoError(err)
framework.Logf("Pod is running on %v. Tainting Node", nodeName)
By("Trying to apply a taint on the Node")
testTaint := getTestTaint()
framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer framework.RemoveTaintOffNode(cs, nodeName, testTaint)
// Wait a bit
By("Waiting for Pod to be deleted")
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C
select {
case <-timeoutChannel:
framework.Logf("Pod wasn't evicted. Test successful")
case <-observedDeletions:
framework.Failf("Pod was evicted despite toleration")
}
})
// 1. Run a pod with a finite toleration
// 2. Taint the node running this pod with a no-execute taint
// 3. See if pod won't get evicted before toleration time runs out
// 4. See if pod will get evicted after toleration time runs out
It("eventually evict pod with finite tolerations from tainted nodes", func() {
podName := "taint-eviction-3"
pod := createPodForTaintsTest(true, KubeletPodDeletionDelaySeconds+2*AdditionalWaitPerDeleteSeconds, podName, ns)
observedDeletions := make(chan struct{}, 100)
stopCh := make(chan struct{})
createTestController(cs, observedDeletions, stopCh, podName, ns)
By("Staring pod...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
framework.ExpectNoError(err)
framework.Logf("Pod is running on %v. Tainting Node", nodeName)
By("Trying to apply a taint on the Node")
testTaint := getTestTaint()
framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer framework.RemoveTaintOffNode(cs, nodeName, testTaint)
// Wait a bit
By("Waiting to see if a Pod won't be deleted")
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C
select {
case <-timeoutChannel:
framework.Logf("Pod wasn't evicted")
case <-observedDeletions:
framework.Failf("Pod was evicted despite toleration")
return
}
By("Waiting for Pod to be deleted")
timeoutChannel = time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C
select {
case <-timeoutChannel:
framework.Failf("Pod wasn't evicted")
case <-observedDeletions:
framework.Logf("Pod was evicted after toleration time run out. Test successful")
return
}
})
// 1. Run a pod with short toleration
// 2. Taint the node running this pod with a no-execute taint
// 3. Wait some time
// 4. Remove the taint
// 5. See if Pod won't be evicted.
It("removing taint cancels eviction", func() {
podName := "taint-eviction-4"
pod := createPodForTaintsTest(true, 2*AdditionalWaitPerDeleteSeconds, podName, ns)
observedDeletions := make(chan struct{}, 100)
stopCh := make(chan struct{})
createTestController(cs, observedDeletions, stopCh, podName, ns)
By("Staring pod...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
framework.ExpectNoError(err)
framework.Logf("Pod is running on %v. Tainting Node", nodeName)
By("Trying to apply a taint on the Node")
testTaint := getTestTaint()
framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
taintRemoved := false
defer func() {
if !taintRemoved {
framework.RemoveTaintOffNode(cs, nodeName, testTaint)
}
}()
// Wait a bit
By("Waiting short time to make sure Pod is queued for deletion")
timeoutChannel := time.NewTimer(AdditionalWaitPerDeleteSeconds).C
select {
case <-timeoutChannel:
framework.Logf("Pod wasn't evicted. Proceeding")
case <-observedDeletions:
framework.Failf("Pod was evicted despite toleration")
return
}
framework.Logf("Removing taint from Node")
framework.RemoveTaintOffNode(cs, nodeName, testTaint)
taintRemoved = true
By("Waiting some time to make sure that toleration time passed.")
timeoutChannel = time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+3*AdditionalWaitPerDeleteSeconds) * time.Second).C
select {
case <-timeoutChannel:
framework.Logf("Pod wasn't evicted. Test successful")
case <-observedDeletions:
framework.Failf("Pod was evicted despite toleration")
}
})
})

View File

@@ -0,0 +1,245 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"fmt"
"math"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var _ = SIGDescribe("Multi-AZ Clusters", func() {
f := framework.NewDefaultFramework("multi-az")
var zoneCount int
var err error
image := framework.ServeHostnameImage
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke", "aws")
if zoneCount <= 0 {
zoneCount, err = getZoneCount(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
}
By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount))
msg := fmt.Sprintf("Zone count is %d, only run for multi-zone clusters, skipping test", zoneCount)
framework.SkipUnlessAtLeast(zoneCount, 2, msg)
// TODO: SkipUnlessDefaultScheduler() // Non-default schedulers might not spread
})
It("should spread the pods of a service across zones", func() {
SpreadServiceOrFail(f, (2*zoneCount)+1, image)
})
It("should spread the pods of a replication controller across zones", func() {
SpreadRCOrFail(f, int32((2*zoneCount)+1), image)
})
})
// Check that the pods comprising a service get spread evenly across available zones
func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) {
// First create the service
serviceName := "test-service"
serviceSpec := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
Namespace: f.Namespace.Name,
},
Spec: v1.ServiceSpec{
Selector: map[string]string{
"service": serviceName,
},
Ports: []v1.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
}},
},
}
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(serviceSpec)
Expect(err).NotTo(HaveOccurred())
// Now create some pods behind the service
podSpec := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
Labels: map[string]string{"service": serviceName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "test",
Image: imageutils.GetPauseImageName(),
},
},
},
}
// Caution: StartPods requires at least one pod to replicate.
// Based on the callers, replicas is always positive number: zoneCount >= 0 implies (2*zoneCount)+1 > 0.
// Thus, no need to test for it. Once the precondition changes to zero number of replicas,
// test for replicaCount > 0. Otherwise, StartPods panics.
framework.ExpectNoError(testutils.StartPods(f.ClientSet, replicaCount, f.Namespace.Name, serviceName, *podSpec, false, framework.Logf))
// Wait for all of them to be scheduled
selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName}))
pods, err := framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector)
Expect(err).NotTo(HaveOccurred())
// Now make sure they're spread across zones
zoneNames, err := getZoneNames(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames)).To(Equal(true))
}
// Find the name of the zone in which a Node is running
func getZoneNameForNode(node v1.Node) (string, error) {
for key, value := range node.Labels {
if key == kubeletapis.LabelZoneFailureDomain {
return value, nil
}
}
return "", fmt.Errorf("Zone name for node %s not found. No label with key %s",
node.Name, kubeletapis.LabelZoneFailureDomain)
}
// TODO (verult) Merge with framework.GetClusterZones()
// Find the names of all zones in which we have nodes in this cluster.
func getZoneNames(c clientset.Interface) ([]string, error) {
zoneNames := sets.NewString()
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, err
}
for _, node := range nodes.Items {
zoneName, err := getZoneNameForNode(node)
Expect(err).NotTo(HaveOccurred())
zoneNames.Insert(zoneName)
}
return zoneNames.List(), nil
}
// Return the number of zones in which we have nodes in this cluster.
func getZoneCount(c clientset.Interface) (int, error) {
zoneNames, err := getZoneNames(c)
if err != nil {
return -1, err
}
return len(zoneNames), nil
}
// Find the name of the zone in which the pod is scheduled
func getZoneNameForPod(c clientset.Interface, pod v1.Pod) (string, error) {
By(fmt.Sprintf("Getting zone name for pod %s, on node %s", pod.Name, pod.Spec.NodeName))
node, err := c.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
return getZoneNameForNode(*node)
}
// Determine whether a set of pods are approximately evenly spread
// across a given set of zones
func checkZoneSpreading(c clientset.Interface, pods *v1.PodList, zoneNames []string) (bool, error) {
podsPerZone := make(map[string]int)
for _, zoneName := range zoneNames {
podsPerZone[zoneName] = 0
}
for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil {
continue
}
zoneName, err := getZoneNameForPod(c, pod)
Expect(err).NotTo(HaveOccurred())
podsPerZone[zoneName] = podsPerZone[zoneName] + 1
}
minPodsPerZone := math.MaxInt32
maxPodsPerZone := 0
for _, podCount := range podsPerZone {
if podCount < minPodsPerZone {
minPodsPerZone = podCount
}
if podCount > maxPodsPerZone {
maxPodsPerZone = podCount
}
}
Expect(minPodsPerZone).To(BeNumerically("~", maxPodsPerZone, 1),
"Pods were not evenly spread across zones. %d in one zone and %d in another zone",
minPodsPerZone, maxPodsPerZone)
return true, nil
}
// Check that the pods comprising a replication controller get spread evenly across available zones
func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
name := "ubelite-spread-rc-" + string(uuid.NewUUID())
By(fmt.Sprintf("Creating replication controller %s", name))
controller, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(&v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
Spec: v1.ReplicationControllerSpec{
Replicas: &replicaCount,
Selector: map[string]string{
"name": name,
},
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"name": name},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: name,
Image: image,
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},
},
},
},
})
Expect(err).NotTo(HaveOccurred())
// Cleanup the replication controller when we are done.
defer func() {
// Resize the replication controller to zero to get rid of pods.
if err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, controller.Name); err != nil {
framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
}
}()
// List the pods, making sure we observe all the replicas.
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicaCount)
Expect(err).NotTo(HaveOccurred())
// Wait for all of them to be scheduled
By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled. Selector: %v", replicaCount, name, selector))
pods, err = framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector)
Expect(err).NotTo(HaveOccurred())
// Now make sure they're spread across zones
zoneNames, err := getZoneNames(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames)).To(Equal(true))
}

View File

@@ -0,0 +1,265 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"fmt"
"strconv"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
compute "google.golang.org/api/compute/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/test/e2e/framework"
)
var _ = SIGDescribe("Multi-AZ Cluster Volumes [sig-storage]", func() {
f := framework.NewDefaultFramework("multi-az")
var zoneCount int
var err error
image := framework.ServeHostnameImage
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
if zoneCount <= 0 {
zoneCount, err = getZoneCount(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
}
By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount))
msg := fmt.Sprintf("Zone count is %d, only run for multi-zone clusters, skipping test", zoneCount)
framework.SkipUnlessAtLeast(zoneCount, 2, msg)
// TODO: SkipUnlessDefaultScheduler() // Non-default schedulers might not spread
})
It("should schedule pods in the same zones as statically provisioned PVs", func() {
PodsUseStaticPVsOrFail(f, (2*zoneCount)+1, image)
})
It("should only be allowed to provision PDs in zones where nodes exist", func() {
OnlyAllowNodeZones(f, zoneCount, image)
})
})
// OnlyAllowNodeZones tests that GetAllCurrentZones returns only zones with Nodes
func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
gceCloud, err := framework.GetGCECloud()
Expect(err).NotTo(HaveOccurred())
// Get all the zones that the nodes are in
expectedZones, err := gceCloud.GetAllZonesFromCloudProvider()
Expect(err).NotTo(HaveOccurred())
framework.Logf("Expected zones: %v", expectedZones)
// Get all the zones in this current region
region := gceCloud.Region()
allZonesInRegion, err := gceCloud.ListZonesInRegion(region)
Expect(err).NotTo(HaveOccurred())
var extraZone string
for _, zone := range allZonesInRegion {
if !expectedZones.Has(zone.Name) {
extraZone = zone.Name
break
}
}
Expect(extraZone).NotTo(Equal(""), fmt.Sprintf("No extra zones available in region %s", region))
By(fmt.Sprintf("starting a compute instance in unused zone: %v\n", extraZone))
project := framework.TestContext.CloudConfig.ProjectID
zone := extraZone
myuuid := string(uuid.NewUUID())
name := "compute-" + myuuid
imageURL := "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140606"
rb := &compute.Instance{
MachineType: "zones/" + zone + "/machineTypes/f1-micro",
Disks: []*compute.AttachedDisk{
{
AutoDelete: true,
Boot: true,
Type: "PERSISTENT",
InitializeParams: &compute.AttachedDiskInitializeParams{
DiskName: "my-root-pd-" + myuuid,
SourceImage: imageURL,
},
},
},
NetworkInterfaces: []*compute.NetworkInterface{
{
AccessConfigs: []*compute.AccessConfig{
{
Type: "ONE_TO_ONE_NAT",
Name: "External NAT",
},
},
Network: "/global/networks/default",
},
},
Name: name,
}
err = gceCloud.InsertInstance(project, zone, rb)
Expect(err).NotTo(HaveOccurred())
defer func() {
// Teardown of the compute instance
framework.Logf("Deleting compute resource: %v", name)
err := gceCloud.DeleteInstance(project, zone, name)
Expect(err).NotTo(HaveOccurred())
}()
By("Creating zoneCount+1 PVCs and making sure PDs are only provisioned in zones with nodes")
// Create some (zoneCount+1) PVCs with names of form "pvc-x" where x is 1...zoneCount+1
// This will exploit ChooseZoneForVolume in pkg/volume/util.go to provision them in all the zones it "sees"
var pvcList []*v1.PersistentVolumeClaim
c := f.ClientSet
ns := f.Namespace.Name
for index := 1; index <= zoneCount+1; index++ {
pvc := newNamedDefaultClaim(ns, index)
pvc, err = framework.CreatePVC(c, ns, pvc)
Expect(err).NotTo(HaveOccurred())
pvcList = append(pvcList, pvc)
// Defer the cleanup
defer func() {
framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name)
err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil)
if err != nil {
framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err)
}
}()
}
// Wait for all claims bound
for _, claim := range pvcList {
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
}
pvZones := sets.NewString()
By("Checking that PDs have been provisioned in only the expected zones")
for _, claim := range pvcList {
// Get a new copy of the claim to have all fields populated
claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Get the related PV
pv, err := c.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pvZone, ok := pv.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain]
Expect(ok).To(BeTrue(), "PV has no LabelZone to be found")
pvZones.Insert(pvZone)
}
Expect(pvZones.Equal(expectedZones)).To(BeTrue(), fmt.Sprintf("PDs provisioned in unwanted zones. We want zones: %v, got: %v", expectedZones, pvZones))
}
type staticPVTestConfig struct {
pvSource *v1.PersistentVolumeSource
pv *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim
pod *v1.Pod
}
// Check that the pods using statically created PVs get scheduled to the same zone that the PV is in.
func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) {
var err error
c := f.ClientSet
ns := f.Namespace.Name
zones, err := getZoneNames(c)
Expect(err).NotTo(HaveOccurred())
By("Creating static PVs across zones")
configs := make([]*staticPVTestConfig, podCount)
for i := range configs {
configs[i] = &staticPVTestConfig{}
}
defer func() {
By("Cleaning up pods and PVs")
for _, config := range configs {
framework.DeletePodOrFail(c, ns, config.pod.Name)
}
for _, config := range configs {
framework.WaitForPodNoLongerRunningInNamespace(c, config.pod.Name, ns)
framework.PVPVCCleanup(c, ns, config.pv, config.pvc)
err = framework.DeletePVSource(config.pvSource)
Expect(err).NotTo(HaveOccurred())
}
}()
for i, config := range configs {
zone := zones[i%len(zones)]
config.pvSource, err = framework.CreatePVSource(zone)
Expect(err).NotTo(HaveOccurred())
pvConfig := framework.PersistentVolumeConfig{
NamePrefix: "multizone-pv",
PVSource: *config.pvSource,
Prebind: nil,
}
className := ""
pvcConfig := framework.PersistentVolumeClaimConfig{StorageClassName: &className}
config.pv, config.pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
Expect(err).NotTo(HaveOccurred())
}
By("Waiting for all PVCs to be bound")
for _, config := range configs {
framework.WaitOnPVandPVC(c, ns, config.pv, config.pvc)
}
By("Creating pods for each static PV")
for _, config := range configs {
podConfig := framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{config.pvc}, false, "")
config.pod, err = c.CoreV1().Pods(ns).Create(podConfig)
Expect(err).NotTo(HaveOccurred())
}
By("Waiting for all pods to be running")
for _, config := range configs {
err = framework.WaitForPodRunningInNamespace(c, config.pod)
Expect(err).NotTo(HaveOccurred())
}
}
func newNamedDefaultClaim(ns string, index int) *v1.PersistentVolumeClaim {
claim := v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc-" + strconv.Itoa(index),
Namespace: ns,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
},
},
},
}
return &claim
}