Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -24,12 +24,13 @@ import (
"time"
"k8s.io/api/core/v1"
schedulerapi "k8s.io/api/scheduling/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
nodeutil "k8s.io/kubernetes/pkg/api/v1/node"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/eviction"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
@@ -42,7 +43,7 @@ import (
)
// Eviction Policy is described here:
// https://github.com/kubernetes/community/blob/master/contributors/design-proposals/kubelet-eviction.md
// https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/kubelet-eviction.md
const (
postTestConditionMonitoringPeriod = 1 * time.Minute
@@ -232,7 +233,6 @@ var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Se
evictionTestTimeout := 10 * time.Minute
Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
initialConfig.FeatureGates[string(features.LocalStorageCapacityIsolation)] = true
// setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty)
initialConfig.EvictionHard = map[string]string{"memory.available": "0%"}
})
@@ -285,9 +285,12 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
expectedNodeCondition := v1.NodeMemoryPressure
expectedStarvedResource := v1.ResourceMemory
pressureTimeout := 10 * time.Minute
highPriorityClassName := f.BaseName + "-high-priority"
highPriority := int32(999999999)
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
initialConfig.FeatureGates[string(features.PodPriority)] = true
memoryConsumed := resource.MustParse("600Mi")
summary := eventuallyGetSummary()
availableBytes := *(summary.Node.Memory.AvailableBytes)
@@ -297,6 +300,14 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
initialConfig.EvictionHard = map[string]string{"memory.available": fmt.Sprintf("%d", availableBytes-uint64(memoryConsumed.Value()))}
initialConfig.EvictionMinimumReclaim = map[string]string{}
})
BeforeEach(func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
})
AfterEach(func() {
err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
})
specs := []podEvictSpec{
{
evictionPriority: 2,
@@ -318,8 +329,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
}),
},
}
systemPriority := int32(2147483647)
specs[1].pod.Spec.Priority = &systemPriority
specs[1].pod.Spec.PriorityClassName = highPriorityClassName
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logMemoryMetrics, specs)
})
})
@@ -332,10 +342,12 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
expectedNodeCondition := v1.NodeDiskPressure
expectedStarvedResource := v1.ResourceEphemeralStorage
pressureTimeout := 10 * time.Minute
highPriorityClassName := f.BaseName + "-high-priority"
highPriority := int32(999999999)
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
initialConfig.FeatureGates[string(features.PodPriority)] = true
initialConfig.FeatureGates[string(features.LocalStorageCapacityIsolation)] = true
diskConsumed := resource.MustParse("350Mi")
summary := eventuallyGetSummary()
availableBytes := *(summary.Node.Fs.AvailableBytes)
@@ -345,6 +357,14 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
initialConfig.EvictionHard = map[string]string{"nodefs.available": fmt.Sprintf("%d", availableBytes-uint64(diskConsumed.Value()))}
initialConfig.EvictionMinimumReclaim = map[string]string{}
})
BeforeEach(func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
})
AfterEach(func() {
err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
})
specs := []podEvictSpec{
{
evictionPriority: 2,
@@ -367,8 +387,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
}),
},
}
systemPriority := int32(2147483647)
specs[1].pod.Spec.Priority = &systemPriority
specs[1].pod.Spec.PriorityClassName = highPriorityClassName
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logDiskMetrics, specs)
})
})