Bumping k8s dependencies to 1.13
This commit is contained in:
45
vendor/k8s.io/kubernetes/test/e2e/scalability/BUILD
generated
vendored
45
vendor/k8s.io/kubernetes/test/e2e/scalability/BUILD
generated
vendored
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"density.go",
|
||||
"empty.go",
|
||||
"framework.go",
|
||||
"load.go",
|
||||
],
|
||||
@@ -16,34 +15,34 @@ go_library(
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery/cached:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/scale:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/transport:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/timer:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery/cached:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//vendor/k8s.io/client-go/scale:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/transport:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
167
vendor/k8s.io/kubernetes/test/e2e/scalability/density.go
generated
vendored
167
vendor/k8s.io/kubernetes/test/e2e/scalability/density.go
generated
vendored
@@ -56,6 +56,7 @@ const (
|
||||
MinSaturationThreshold = 2 * time.Minute
|
||||
MinPodsPerSecondThroughput = 8
|
||||
DensityPollInterval = 10 * time.Second
|
||||
MinPodStartupMeasurements = 500
|
||||
)
|
||||
|
||||
// Maximum container failures this test tolerates before failing.
|
||||
@@ -64,6 +65,9 @@ var MaxContainerFailures = 0
|
||||
// Maximum no. of missing measurements related to pod-startup that the test tolerates.
|
||||
var MaxMissingPodStartupMeasurements = 0
|
||||
|
||||
// Number of nodes in the cluster (computed inside BeforeEach).
|
||||
var nodeCount = 0
|
||||
|
||||
type DensityTestConfig struct {
|
||||
Configs []testutils.RunObjectConfig
|
||||
ClientSets []clientset.Interface
|
||||
@@ -168,9 +172,9 @@ func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceC
|
||||
}
|
||||
} else {
|
||||
if numNodes <= 100 {
|
||||
apiserverCPU = 1.8
|
||||
apiserverCPU = 2.2
|
||||
apiserverMem = 1700 * (1024 * 1024)
|
||||
controllerCPU = 0.6
|
||||
controllerCPU = 0.8
|
||||
controllerMem = 530 * (1024 * 1024)
|
||||
schedulerCPU = 0.4
|
||||
schedulerMem = 180 * (1024 * 1024)
|
||||
@@ -285,6 +289,11 @@ func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTi
|
||||
|
||||
replicationCtrlStartupPhase := testPhaseDurations.StartPhase(300, "saturation pods creation")
|
||||
defer replicationCtrlStartupPhase.End()
|
||||
|
||||
// Start scheduler CPU profile-gatherer before we begin cluster saturation.
|
||||
profileGatheringDelay := time.Duration(1+nodeCount/100) * time.Minute
|
||||
schedulerProfilingStopCh := framework.StartCPUProfileGatherer("kube-scheduler", "density", profileGatheringDelay)
|
||||
|
||||
// Start all replication controllers.
|
||||
startTime := time.Now()
|
||||
wg := sync.WaitGroup{}
|
||||
@@ -304,10 +313,16 @@ func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTi
|
||||
wg.Wait()
|
||||
startupTime := time.Since(startTime)
|
||||
close(logStopCh)
|
||||
close(schedulerProfilingStopCh)
|
||||
framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
|
||||
framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
|
||||
replicationCtrlStartupPhase.End()
|
||||
|
||||
// Grabbing scheduler memory profile after cluster saturation finished.
|
||||
wg.Add(1)
|
||||
framework.GatherMemoryProfile("kube-scheduler", "density", &wg)
|
||||
wg.Wait()
|
||||
|
||||
printPodAllocationPhase := testPhaseDurations.StartPhase(400, "printing pod allocation")
|
||||
defer printPodAllocationPhase.End()
|
||||
// Print some data about Pod to Node allocation
|
||||
@@ -366,7 +381,6 @@ func cleanupDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPha
|
||||
// limits on Docker's concurrent container startup.
|
||||
var _ = SIGDescribe("Density", func() {
|
||||
var c clientset.Interface
|
||||
var nodeCount int
|
||||
var additionalPodsPrefix string
|
||||
var ns string
|
||||
var uuid string
|
||||
@@ -381,6 +395,7 @@ var _ = SIGDescribe("Density", func() {
|
||||
missingMeasurements := 0
|
||||
var testPhaseDurations *timer.TestPhaseTimer
|
||||
var profileGathererStopCh chan struct{}
|
||||
var etcdMetricsCollector *framework.EtcdMetricsCollector
|
||||
|
||||
// Gathers data prior to framework namespace teardown
|
||||
AfterEach(func() {
|
||||
@@ -388,7 +403,7 @@ var _ = SIGDescribe("Density", func() {
|
||||
close(profileGathererStopCh)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
framework.GatherApiserverMemoryProfile(&wg, "density")
|
||||
framework.GatherMemoryProfile("kube-apiserver", "density", &wg)
|
||||
wg.Wait()
|
||||
|
||||
saturationThreshold := time.Duration((totalPods / MinPodsPerSecondThroughput)) * time.Second
|
||||
@@ -412,7 +427,7 @@ var _ = SIGDescribe("Density", func() {
|
||||
summaries = append(summaries, metrics)
|
||||
}
|
||||
|
||||
// Verify scheduler metrics.
|
||||
// Summarize scheduler metrics.
|
||||
latency, err := framework.VerifySchedulerLatency(c)
|
||||
framework.ExpectNoError(err)
|
||||
if err == nil {
|
||||
@@ -428,6 +443,14 @@ var _ = SIGDescribe("Density", func() {
|
||||
}
|
||||
summaries = append(summaries, latency)
|
||||
}
|
||||
|
||||
// Summarize etcd metrics.
|
||||
err = etcdMetricsCollector.StopAndSummarize()
|
||||
framework.ExpectNoError(err)
|
||||
if err == nil {
|
||||
summaries = append(summaries, etcdMetricsCollector.GetMetrics())
|
||||
}
|
||||
|
||||
summaries = append(summaries, testPhaseDurations)
|
||||
|
||||
framework.PrintSummaries(summaries, testCaseBaseName)
|
||||
@@ -487,7 +510,11 @@ var _ = SIGDescribe("Density", func() {
|
||||
|
||||
// Start apiserver CPU profile gatherer with frequency based on cluster size.
|
||||
profileGatheringDelay := time.Duration(5+nodeCount/100) * time.Minute
|
||||
profileGathererStopCh = framework.StartApiserverCPUProfileGatherer(profileGatheringDelay)
|
||||
profileGathererStopCh = framework.StartCPUProfileGatherer("kube-apiserver", "density", profileGatheringDelay)
|
||||
|
||||
// Start etcs metrics collection.
|
||||
etcdMetricsCollector = framework.NewEtcdMetricsCollector()
|
||||
etcdMetricsCollector.StartCollecting(time.Minute)
|
||||
})
|
||||
|
||||
type Density struct {
|
||||
@@ -588,6 +615,7 @@ var _ = SIGDescribe("Density", func() {
|
||||
timeout := time.Duration(totalPods/podThroughput)*time.Second + 3*time.Minute
|
||||
// createClients is defined in load.go
|
||||
clients, internalClients, scalesClients, err := createClients(numberOfCollections)
|
||||
framework.ExpectNoError(err)
|
||||
for i := 0; i < numberOfCollections; i++ {
|
||||
nsName := namespaces[i].Name
|
||||
secretNames := []string{}
|
||||
@@ -651,7 +679,7 @@ var _ = SIGDescribe("Density", func() {
|
||||
|
||||
// Single client is running out of http2 connections in delete phase, hence we need more.
|
||||
clients, internalClients, scalesClients, err = createClients(2)
|
||||
|
||||
framework.ExpectNoError(err)
|
||||
dConfig := DensityTestConfig{
|
||||
ClientSets: clients,
|
||||
InternalClientsets: internalClients,
|
||||
@@ -675,7 +703,10 @@ var _ = SIGDescribe("Density", func() {
|
||||
}
|
||||
e2eStartupTime = runDensityTest(dConfig, testPhaseDurations, &scheduleThroughputs)
|
||||
if itArg.runLatencyTest {
|
||||
By("Scheduling additional Pods to measure startup latencies")
|
||||
// Pick latencyPodsIterations so that:
|
||||
// latencyPodsIterations * nodeCount >= MinPodStartupMeasurements.
|
||||
latencyPodsIterations := (MinPodStartupMeasurements + nodeCount - 1) / nodeCount
|
||||
By(fmt.Sprintf("Scheduling additional %d Pods to measure startup latencies", latencyPodsIterations*nodeCount))
|
||||
|
||||
createTimes := make(map[string]metav1.Time, 0)
|
||||
nodeNames := make(map[string]string, 0)
|
||||
@@ -754,58 +785,76 @@ var _ = SIGDescribe("Density", func() {
|
||||
|
||||
go controller.Run(stopCh)
|
||||
}
|
||||
for latencyPodsIteration := 0; latencyPodsIteration < latencyPodsIterations; latencyPodsIteration++ {
|
||||
podIndexOffset := latencyPodsIteration * nodeCount
|
||||
framework.Logf("Creating %d latency pods in range [%d, %d]", nodeCount, podIndexOffset+1, podIndexOffset+nodeCount)
|
||||
|
||||
// Create some additional pods with throughput ~5 pods/sec.
|
||||
latencyPodStartupPhase := testPhaseDurations.StartPhase(800, "latency pods creation")
|
||||
defer latencyPodStartupPhase.End()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(nodeCount)
|
||||
// Explicitly set requests here.
|
||||
// Thanks to it we trigger increasing priority function by scheduling
|
||||
// a pod to a node, which in turn will result in spreading latency pods
|
||||
// more evenly between nodes.
|
||||
cpuRequest := *resource.NewMilliQuantity(nodeCpuCapacity/5, resource.DecimalSI)
|
||||
memRequest := *resource.NewQuantity(nodeMemCapacity/5, resource.DecimalSI)
|
||||
if podsPerNode > 30 {
|
||||
// This is to make them schedulable on high-density tests
|
||||
// (e.g. 100 pods/node kubemark).
|
||||
cpuRequest = *resource.NewMilliQuantity(0, resource.DecimalSI)
|
||||
memRequest = *resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
rcNameToNsMap := map[string]string{}
|
||||
for i := 1; i <= nodeCount; i++ {
|
||||
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
|
||||
nsName := namespaces[i%len(namespaces)].Name
|
||||
rcNameToNsMap[name] = nsName
|
||||
go createRunningPodFromRC(&wg, c, name, nsName, imageutils.GetPauseImageName(), additionalPodsPrefix, cpuRequest, memRequest)
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
}
|
||||
wg.Wait()
|
||||
latencyPodStartupPhase.End()
|
||||
watchTimesLen := len(watchTimes)
|
||||
|
||||
latencyMeasurementPhase := testPhaseDurations.StartPhase(810, "pod startup latencies measurement")
|
||||
defer latencyMeasurementPhase.End()
|
||||
By("Waiting for all Pods begin observed by the watch...")
|
||||
waitTimeout := 10 * time.Minute
|
||||
for start := time.Now(); len(watchTimes) < nodeCount; time.Sleep(10 * time.Second) {
|
||||
if time.Since(start) < waitTimeout {
|
||||
framework.Failf("Timeout reached waiting for all Pods being observed by the watch.")
|
||||
// Create some additional pods with throughput ~5 pods/sec.
|
||||
latencyPodStartupPhase := testPhaseDurations.StartPhase(800+latencyPodsIteration*10, "latency pods creation")
|
||||
defer latencyPodStartupPhase.End()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(nodeCount)
|
||||
// Explicitly set requests here.
|
||||
// Thanks to it we trigger increasing priority function by scheduling
|
||||
// a pod to a node, which in turn will result in spreading latency pods
|
||||
// more evenly between nodes.
|
||||
cpuRequest := *resource.NewMilliQuantity(nodeCpuCapacity/5, resource.DecimalSI)
|
||||
memRequest := *resource.NewQuantity(nodeMemCapacity/5, resource.DecimalSI)
|
||||
if podsPerNode > 30 {
|
||||
// This is to make them schedulable on high-density tests
|
||||
// (e.g. 100 pods/node kubemark).
|
||||
cpuRequest = *resource.NewMilliQuantity(0, resource.DecimalSI)
|
||||
memRequest = *resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
close(stopCh)
|
||||
|
||||
nodeToLatencyPods := make(map[string]int)
|
||||
for i := range latencyPodStores {
|
||||
for _, item := range latencyPodStores[i].List() {
|
||||
pod := item.(*v1.Pod)
|
||||
nodeToLatencyPods[pod.Spec.NodeName]++
|
||||
rcNameToNsMap := map[string]string{}
|
||||
for i := 1; i <= nodeCount; i++ {
|
||||
name := additionalPodsPrefix + "-" + strconv.Itoa(podIndexOffset+i)
|
||||
nsName := namespaces[i%len(namespaces)].Name
|
||||
rcNameToNsMap[name] = nsName
|
||||
go createRunningPodFromRC(&wg, c, name, nsName, imageutils.GetPauseImageName(), additionalPodsPrefix, cpuRequest, memRequest)
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
}
|
||||
for node, count := range nodeToLatencyPods {
|
||||
if count > 1 {
|
||||
framework.Logf("%d latency pods scheduled on %s", count, node)
|
||||
wg.Wait()
|
||||
latencyPodStartupPhase.End()
|
||||
|
||||
latencyMeasurementPhase := testPhaseDurations.StartPhase(801+latencyPodsIteration*10, "pod startup latencies measurement")
|
||||
defer latencyMeasurementPhase.End()
|
||||
By("Waiting for all Pods begin observed by the watch...")
|
||||
waitTimeout := 10 * time.Minute
|
||||
for start := time.Now(); len(watchTimes) < watchTimesLen+nodeCount; time.Sleep(10 * time.Second) {
|
||||
if time.Since(start) < waitTimeout {
|
||||
framework.Failf("Timeout reached waiting for all Pods being observed by the watch.")
|
||||
}
|
||||
}
|
||||
|
||||
nodeToLatencyPods := make(map[string]int)
|
||||
for i := range latencyPodStores {
|
||||
for _, item := range latencyPodStores[i].List() {
|
||||
pod := item.(*v1.Pod)
|
||||
nodeToLatencyPods[pod.Spec.NodeName]++
|
||||
}
|
||||
for node, count := range nodeToLatencyPods {
|
||||
if count > 1 {
|
||||
framework.Logf("%d latency pods scheduled on %s", count, node)
|
||||
}
|
||||
}
|
||||
}
|
||||
latencyMeasurementPhase.End()
|
||||
|
||||
By("Removing additional replication controllers")
|
||||
podDeletionPhase := testPhaseDurations.StartPhase(802+latencyPodsIteration*10, "latency pods deletion")
|
||||
defer podDeletionPhase.End()
|
||||
deleteRC := func(i int) {
|
||||
defer GinkgoRecover()
|
||||
name := additionalPodsPrefix + "-" + strconv.Itoa(podIndexOffset+i+1)
|
||||
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(c, rcNameToNsMap[name], name))
|
||||
}
|
||||
workqueue.Parallelize(25, nodeCount, deleteRC)
|
||||
podDeletionPhase.End()
|
||||
}
|
||||
close(stopCh)
|
||||
|
||||
for i := 0; i < len(namespaces); i++ {
|
||||
nsName := namespaces[i].Name
|
||||
@@ -893,18 +942,6 @@ var _ = SIGDescribe("Density", func() {
|
||||
framework.ExpectNoError(framework.VerifyLatencyWithinThreshold(podStartupLatencyThreshold, podStartupLatency.E2ELatency, "pod startup"))
|
||||
|
||||
framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c)
|
||||
latencyMeasurementPhase.End()
|
||||
|
||||
By("Removing additional replication controllers")
|
||||
podDeletionPhase := testPhaseDurations.StartPhase(820, "latency pods deletion")
|
||||
defer podDeletionPhase.End()
|
||||
deleteRC := func(i int) {
|
||||
defer GinkgoRecover()
|
||||
name := additionalPodsPrefix + "-" + strconv.Itoa(i+1)
|
||||
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(c, rcNameToNsMap[name], name))
|
||||
}
|
||||
workqueue.Parallelize(25, nodeCount, deleteRC)
|
||||
podDeletionPhase.End()
|
||||
}
|
||||
cleanupDensityTest(dConfig, testPhaseDurations)
|
||||
})
|
||||
|
54
vendor/k8s.io/kubernetes/test/e2e/scalability/empty.go
generated
vendored
54
vendor/k8s.io/kubernetes/test/e2e/scalability/empty.go
generated
vendored
@@ -1,54 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scalability
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Empty [Feature:Empty]", func() {
|
||||
f := framework.NewDefaultFramework("empty")
|
||||
|
||||
BeforeEach(func() {
|
||||
c := f.ClientSet
|
||||
ns := f.Namespace.Name
|
||||
|
||||
// TODO: respect --allow-notready-nodes flag in those functions.
|
||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
|
||||
framework.WaitForAllNodesHealthy(c, time.Minute)
|
||||
|
||||
err := framework.CheckTestingNSDeletedExcept(c, ns)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
It("starts a pod", func() {
|
||||
configs, _, _ := GenerateConfigsForGroup([]*v1.Namespace{f.Namespace}, "empty-pod", 1, 1, imageutils.GetPauseImageName(), []string{}, api.Kind("ReplicationController"), 0, 0)
|
||||
if len(configs) != 1 {
|
||||
framework.Failf("generateConfigs should have generated single config")
|
||||
}
|
||||
config := configs[0]
|
||||
config.SetClient(f.ClientSet)
|
||||
framework.ExpectNoError(config.Run())
|
||||
})
|
||||
})
|
14
vendor/k8s.io/kubernetes/test/e2e/scalability/load.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/e2e/scalability/load.go
generated
vendored
@@ -106,7 +106,7 @@ var _ = SIGDescribe("Load capacity", func() {
|
||||
close(profileGathererStopCh)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
framework.GatherApiserverMemoryProfile(&wg, "load")
|
||||
framework.GatherMemoryProfile("kube-apiserver", "load", &wg)
|
||||
wg.Wait()
|
||||
|
||||
// Verify latency metrics
|
||||
@@ -159,7 +159,7 @@ var _ = SIGDescribe("Load capacity", func() {
|
||||
|
||||
// Start apiserver CPU profile gatherer with frequency based on cluster size.
|
||||
profileGatheringDelay := time.Duration(5+nodeCount/100) * time.Minute
|
||||
profileGathererStopCh = framework.StartApiserverCPUProfileGatherer(profileGatheringDelay)
|
||||
profileGathererStopCh = framework.StartCPUProfileGatherer("kube-apiserver", "load", profileGatheringDelay)
|
||||
})
|
||||
|
||||
type Load struct {
|
||||
@@ -320,16 +320,14 @@ var _ = SIGDescribe("Load capacity", func() {
|
||||
// We would like to spread scaling replication controllers over time
|
||||
// to make it possible to create/schedule & delete them in the meantime.
|
||||
// Currently we assume that <throughput> pods/second average throughput.
|
||||
// The expected number of created/deleted pods is less than totalPods/3.
|
||||
scalingTime := time.Duration(totalPods/(3*throughput)) * time.Second
|
||||
|
||||
// The expected number of created/deleted pods is totalPods/4 when scaling,
|
||||
// as each RC changes its size from X to a uniform random value in [X/2, 3X/2].
|
||||
scalingTime := time.Duration(totalPods/(4*throughput)) * time.Second
|
||||
framework.Logf("Starting to scale %v objects first time...", itArg.kind)
|
||||
scaleAllResources(configs, scalingTime, testPhaseDurations.StartPhase(300, "scaling first time"))
|
||||
By("============================================================================")
|
||||
|
||||
framework.Logf("Starting to scale %v objects second time...", itArg.kind)
|
||||
scaleAllResources(configs, scalingTime, testPhaseDurations.StartPhase(400, "scaling second time"))
|
||||
By("============================================================================")
|
||||
|
||||
// Cleanup all created replication controllers.
|
||||
// Currently we assume <throughput> pods/second average deletion throughput.
|
||||
// We may want to revisit it in the future.
|
||||
|
Reference in New Issue
Block a user