Add generated file

This PR adds generated files under pkg/client and vendor folder.
This commit is contained in:
xing-yang
2018-07-12 10:55:15 -07:00
parent 36b1de0341
commit e213d1890d
17729 changed files with 5090889 additions and 0 deletions

View File

@@ -0,0 +1,60 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"scheduler_perf_types.go",
"util.go",
],
importpath = "k8s.io/kubernetes/test/integration/scheduler_perf",
deps = [
"//pkg/scheduler:go_default_library",
"//pkg/scheduler/algorithmprovider:go_default_library",
"//test/integration/util:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
go_test(
name = "go_default_test",
size = "large",
srcs = [
"main_test.go",
"scheduler_bench_test.go",
"scheduler_test.go",
],
embed = [":go_default_library"],
tags = ["integration"],
deps = [
"//pkg/kubelet/apis:go_default_library",
"//pkg/scheduler:go_default_library",
"//test/integration/framework:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@@ -0,0 +1,16 @@
approvers:
- bsalamat
- davidopp
- gmarek
- jayunit100
- timothysc
- wojtek-t
reviewers:
- bsalamat
- davidopp
- jayunit100
- k82cn
- ravisantoshgudimetla
- sjug
- timothysc
- wojtek-t

View File

@@ -0,0 +1,48 @@
Scheduler Performance Test
======
Motivation
------
We already have a performance testing system -- Kubemark. However, Kubemark requires setting up and bootstrapping a whole cluster, which takes a lot of time.
We want to have a standard way to reproduce scheduling latency metrics result and benchmark scheduler as simple and fast as possible. We have the following goals:
- Save time on testing
- The test and benchmark can be run in a single box.
We only set up components necessary to scheduling without booting up a cluster.
- Profiling runtime metrics to find out bottleneck
- Write scheduler integration test but focus on performance measurement.
Take advantage of go profiling tools and collect fine-grained metrics,
like cpu-profiling, memory-profiling and block-profiling.
- Reproduce test result easily
- We want to have a known place to do the performance related test for scheduler.
Developers should just run one script to collect all the information they need.
Currently the test suite has the following:
- density test (by adding a new Go test)
- schedule 30k pods on 1000 (fake) nodes and 3k pods on 100 (fake) nodes
- print out scheduling rate every second
- let you learn the rate changes vs number of scheduled pods
- benchmark
- make use of `go test -bench` and report nanosecond/op.
- schedule b.N pods when the cluster has N nodes and P scheduled pods. Since it takes relatively long time to finish one round, b.N is small: 10 - 100.
How To Run
------
```shell
# In Kubernetes root path
make generated_files
cd test/integration/scheduler_perf
./test-performance.sh
```
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/test/component/scheduler/perf/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/test/integration/scheduler_perf/README.md?pixel)]()

View File

@@ -0,0 +1,27 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package benchmark
import (
"testing"
"k8s.io/kubernetes/test/integration/framework"
)
func TestMain(m *testing.M) {
framework.EtcdMain(m.Run)
}

View File

@@ -0,0 +1,161 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package benchmark
import (
"fmt"
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/test/integration/framework"
testutils "k8s.io/kubernetes/test/utils"
"github.com/golang/glog"
)
// BenchmarkScheduling benchmarks the scheduling rate when the cluster has
// various quantities of nodes and scheduled pods.
func BenchmarkScheduling(b *testing.B) {
tests := []struct{ nodes, existingPods, minPods int }{
{nodes: 100, existingPods: 0, minPods: 100},
{nodes: 100, existingPods: 1000, minPods: 100},
{nodes: 1000, existingPods: 0, minPods: 100},
{nodes: 1000, existingPods: 1000, minPods: 100},
}
setupStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("rc1")
testStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("rc2")
for _, test := range tests {
name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
b.Run(name, func(b *testing.B) {
benchmarkScheduling(test.nodes, test.existingPods, test.minPods, setupStrategy, testStrategy, b)
})
}
}
// BenchmarkSchedulingAntiAffinity benchmarks the scheduling rate of pods with
// PodAntiAffinity rules when the cluster has various quantities of nodes and
// scheduled pods.
func BenchmarkSchedulingAntiAffinity(b *testing.B) {
tests := []struct{ nodes, existingPods, minPods int }{
{nodes: 500, existingPods: 250, minPods: 250},
{nodes: 500, existingPods: 5000, minPods: 250},
}
// The setup strategy creates pods with no affinity rules.
setupStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("setup")
// The test strategy creates pods with anti-affinity for each other.
testBasePod := makeBasePodWithAntiAffinity(
map[string]string{"name": "test", "color": "green"},
map[string]string{"color": "green"})
testStrategy := testutils.NewCustomCreatePodStrategy(testBasePod)
for _, test := range tests {
name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
b.Run(name, func(b *testing.B) {
benchmarkScheduling(test.nodes, test.existingPods, test.minPods, setupStrategy, testStrategy, b)
})
}
}
// makeBasePodWithAntiAffinity creates a Pod object to be used as a template.
// The Pod has a PodAntiAffinity requirement against pods with the given labels.
func makeBasePodWithAntiAffinity(podLabels, affinityLabels map[string]string) *v1.Pod {
basePod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "affinity-pod-",
Labels: podLabels,
},
Spec: testutils.MakePodSpec(),
}
basePod.Spec.Affinity = &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affinityLabels,
},
TopologyKey: apis.LabelHostname,
},
},
},
}
return basePod
}
// benchmarkScheduling benchmarks scheduling rate with specific number of nodes
// and specific number of pods already scheduled.
// This will schedule numExistingPods pods before the benchmark starts, and at
// least minPods pods during the benchmark.
func benchmarkScheduling(numNodes, numExistingPods, minPods int,
setupPodStrategy, testPodStrategy testutils.TestPodCreateStrategy,
b *testing.B) {
if b.N < minPods {
b.N = minPods
}
schedulerConfigFactory, finalFunc := mustSetupScheduler()
defer finalFunc()
c := schedulerConfigFactory.GetClient()
nodePreparer := framework.NewIntegrationTestNodePreparer(
c,
[]testutils.CountToStrategy{{Count: numNodes, Strategy: &testutils.TrivialNodePrepareStrategy{}}},
"scheduler-perf-",
)
if err := nodePreparer.PrepareNodes(); err != nil {
glog.Fatalf("%v", err)
}
defer nodePreparer.CleanupNodes()
config := testutils.NewTestPodCreatorConfig()
config.AddStrategy("sched-test", numExistingPods, setupPodStrategy)
podCreator := testutils.NewTestPodCreator(c, config)
podCreator.CreatePods()
for {
scheduled, err := schedulerConfigFactory.GetScheduledPodLister().List(labels.Everything())
if err != nil {
glog.Fatalf("%v", err)
}
if len(scheduled) >= numExistingPods {
break
}
time.Sleep(1 * time.Second)
}
// start benchmark
b.ResetTimer()
config = testutils.NewTestPodCreatorConfig()
config.AddStrategy("sched-test", b.N, testPodStrategy)
podCreator = testutils.NewTestPodCreator(c, config)
podCreator.CreatePods()
for {
// This can potentially affect performance of scheduler, since List() is done under mutex.
// TODO: Setup watch on apiserver and wait until all pods scheduled.
scheduled, err := schedulerConfigFactory.GetScheduledPodLister().List(labels.Everything())
if err != nil {
glog.Fatalf("%v", err)
}
if len(scheduled) >= numExistingPods+b.N {
break
}
// Note: This might introduce slight deviation in accuracy of benchmark results.
// Since the total amount of time is relatively large, it might not be a concern.
time.Sleep(100 * time.Millisecond)
}
}

View File

@@ -0,0 +1,31 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package benchmark
// High Level Configuration for all predicates and priorities.
type schedulerPerfConfig struct {
NodeCount int // The number of nodes which will be seeded with metadata to match predicates and have non-trivial priority rankings.
PodCount int // The number of pods which will be seeded with metadata to match predicates and have non-trivial priority rankings.
NodeAffinity *nodeAffinity
// TODO: Other predicates and priorities to be added here.
}
// nodeAffinity priority configuration details.
type nodeAffinity struct {
nodeAffinityKey string // Node Selection Key.
LabelCount int // number of labels to be added to each node or pod.
}

View File

@@ -0,0 +1,273 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package benchmark
import (
"fmt"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/scheduler"
testutils "k8s.io/kubernetes/test/utils"
"math"
"strconv"
"testing"
"time"
)
const (
warning3K = 100
threshold3K = 30
threshold30K = 30
threshold60K = 30
)
var (
basePodTemplate = &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "sched-perf-pod-",
},
// TODO: this needs to be configurable.
Spec: testutils.MakePodSpec(),
}
baseNodeTemplate = &v1.Node{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "sample-node-",
},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("32Gi"),
},
Phase: v1.NodeRunning,
Conditions: []v1.NodeCondition{
{Type: v1.NodeReady, Status: v1.ConditionTrue},
},
},
}
)
// TestSchedule100Node3KPods schedules 3k pods on 100 nodes.
func TestSchedule100Node3KPods(t *testing.T) {
if testing.Short() {
t.Skip("Skipping because we want to run short tests")
}
config := getBaseConfig(100, 3000)
err := writePodAndNodeTopologyToConfig(config)
if err != nil {
t.Errorf("Misconfiguration happened for nodes/pods chosen to have predicates and priorities")
}
min := schedulePods(config)
if min < threshold3K {
t.Errorf("Failing: Scheduling rate was too low for an interval, we saw rate of %v, which is the allowed minimum of %v ! ", min, threshold3K)
} else if min < warning3K {
fmt.Printf("Warning: pod scheduling throughput for 3k pods was slow for an interval... Saw a interval with very low (%v) scheduling rate!", min)
} else {
fmt.Printf("Minimal observed throughput for 3k pod test: %v\n", min)
}
}
// TestSchedule2000Node60KPods schedules 60k pods on 2000 nodes.
// This test won't fit in normal 10 minutes time window.
// func TestSchedule2000Node60KPods(t *testing.T) {
// if testing.Short() {
// t.Skip("Skipping because we want to run short tests")
// }
// config := defaultSchedulerBenchmarkConfig(2000, 60000)
// if min := schedulePods(config); min < threshold60K {
// t.Errorf("To small pod scheduling throughput for 60k pods. Expected %v got %v", threshold60K, min)
// } else {
// fmt.Printf("Minimal observed throughput for 60k pod test: %v\n", min)
// }
// }
// testConfig contains the some input parameters needed for running test-suite
type testConfig struct {
numPods int
numNodes int
mutatedNodeTemplate *v1.Node
mutatedPodTemplate *v1.Pod
schedulerSupportFunctions scheduler.Configurator
destroyFunc func()
}
// getBaseConfig returns baseConfig after initializing number of nodes and pods.
func getBaseConfig(nodes int, pods int) *testConfig {
schedulerConfigFactory, destroyFunc := mustSetupScheduler()
return &testConfig{
schedulerSupportFunctions: schedulerConfigFactory,
destroyFunc: destroyFunc,
numNodes: nodes,
numPods: pods,
}
}
// schedulePods schedules specific number of pods on specific number of nodes.
// This is used to learn the scheduling throughput on various
// sizes of cluster and changes as more and more pods are scheduled.
// It won't stop until all pods are scheduled.
// It returns the minimum of throughput over whole run.
func schedulePods(config *testConfig) int32 {
defer config.destroyFunc()
prev := 0
// On startup there may be a latent period where NO scheduling occurs (qps = 0).
// We are interested in low scheduling rates (i.e. qps=2),
minQps := int32(math.MaxInt32)
start := time.Now()
// Bake in time for the first pod scheduling event.
for {
time.Sleep(50 * time.Millisecond)
scheduled, err := config.schedulerSupportFunctions.GetScheduledPodLister().List(labels.Everything())
if err != nil {
glog.Fatalf("%v", err)
}
// 30,000 pods -> wait till @ least 300 are scheduled to start measuring.
// TODO Find out why sometimes there may be scheduling blips in the beginning.
if len(scheduled) > config.numPods/100 {
break
}
}
// map minimum QPS entries in a counter, useful for debugging tests.
qpsStats := map[int]int{}
// Now that scheduling has started, lets start taking the pulse on how many pods are happening per second.
for {
// This can potentially affect performance of scheduler, since List() is done under mutex.
// Listing 10000 pods is an expensive operation, so running it frequently may impact scheduler.
// TODO: Setup watch on apiserver and wait until all pods scheduled.
scheduled, err := config.schedulerSupportFunctions.GetScheduledPodLister().List(labels.Everything())
if err != nil {
glog.Fatalf("%v", err)
}
// We will be completed when all pods are done being scheduled.
// return the worst-case-scenario interval that was seen during this time.
// Note this should never be low due to cold-start, so allow bake in sched time if necessary.
if len(scheduled) >= config.numPods {
fmt.Printf("Scheduled %v Pods in %v seconds (%v per second on average). min QPS was %v\n",
config.numPods, int(time.Since(start)/time.Second), config.numPods/int(time.Since(start)/time.Second), minQps)
return minQps
}
// There's no point in printing it for the last iteration, as the value is random
qps := len(scheduled) - prev
qpsStats[qps] += 1
if int32(qps) < minQps {
minQps = int32(qps)
}
fmt.Printf("%ds\trate: %d\ttotal: %d (qps frequency: %v)\n", time.Since(start)/time.Second, qps, len(scheduled), qpsStats)
prev = len(scheduled)
time.Sleep(1 * time.Second)
}
}
// mutateNodeTemplate returns the modified node needed for creation of nodes.
func (na nodeAffinity) mutateNodeTemplate(node *v1.Node) {
labels := make(map[string]string)
for i := 0; i < na.LabelCount; i++ {
value := strconv.Itoa(i)
key := na.nodeAffinityKey + value
labels[key] = value
}
node.ObjectMeta.Labels = labels
return
}
// mutatePodTemplate returns the modified pod template after applying mutations.
func (na nodeAffinity) mutatePodTemplate(pod *v1.Pod) {
var nodeSelectorRequirements []v1.NodeSelectorRequirement
for i := 0; i < na.LabelCount; i++ {
value := strconv.Itoa(i)
key := na.nodeAffinityKey + value
nodeSelector := v1.NodeSelectorRequirement{Key: key, Values: []string{value}, Operator: v1.NodeSelectorOpIn}
nodeSelectorRequirements = append(nodeSelectorRequirements, nodeSelector)
}
pod.Spec.Affinity = &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: nodeSelectorRequirements,
},
},
},
},
}
}
// generateNodes generates nodes to be used for scheduling.
func (inputConfig *schedulerPerfConfig) generateNodes(config *testConfig) {
for i := 0; i < inputConfig.NodeCount; i++ {
config.schedulerSupportFunctions.GetClient().CoreV1().Nodes().Create(config.mutatedNodeTemplate)
}
for i := 0; i < config.numNodes-inputConfig.NodeCount; i++ {
config.schedulerSupportFunctions.GetClient().CoreV1().Nodes().Create(baseNodeTemplate)
}
}
// generatePods generates pods to be used for scheduling.
func (inputConfig *schedulerPerfConfig) generatePods(config *testConfig) {
testutils.CreatePod(config.schedulerSupportFunctions.GetClient(), "sample", inputConfig.PodCount, config.mutatedPodTemplate)
testutils.CreatePod(config.schedulerSupportFunctions.GetClient(), "sample", config.numPods-inputConfig.PodCount, basePodTemplate)
}
// generatePodAndNodeTopology is the wrapper function for modifying both pods and node objects.
func (inputConfig *schedulerPerfConfig) generatePodAndNodeTopology(config *testConfig) error {
if config.numNodes < inputConfig.NodeCount || config.numPods < inputConfig.PodCount {
return fmt.Errorf("NodeCount cannot be greater than numNodes")
}
nodeAffinity := inputConfig.NodeAffinity
// Node template that needs to be mutated.
mutatedNodeTemplate := baseNodeTemplate
// Pod template that needs to be mutated.
mutatedPodTemplate := basePodTemplate
if nodeAffinity != nil {
nodeAffinity.mutateNodeTemplate(mutatedNodeTemplate)
nodeAffinity.mutatePodTemplate(mutatedPodTemplate)
} // TODO: other predicates/priorities will be processed in subsequent if statements or a switch:).
config.mutatedPodTemplate = mutatedPodTemplate
config.mutatedNodeTemplate = mutatedNodeTemplate
inputConfig.generateNodes(config)
inputConfig.generatePods(config)
return nil
}
// writePodAndNodeTopologyToConfig reads a configuration and then applies it to a test configuration.
//TODO: As of now, this function is not doing anything except for reading input values to priority structs.
func writePodAndNodeTopologyToConfig(config *testConfig) error {
// High Level structure that should be filled for every predicate or priority.
inputConfig := &schedulerPerfConfig{
NodeCount: 100,
PodCount: 3000,
NodeAffinity: &nodeAffinity{
nodeAffinityKey: "kubernetes.io/sched-perf-node-affinity-",
LabelCount: 10,
},
}
err := inputConfig.generatePodAndNodeTopology(config)
if err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,52 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../../
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
DIR_BASENAME=$(dirname "${BASH_SOURCE}")
pushd ${DIR_BASENAME}
cleanup() {
popd 2> /dev/null
kube::etcd::cleanup
kube::log::status "performance test cleanup complete"
}
trap cleanup EXIT
kube::etcd::start
# We are using the benchmark suite to do profiling. Because it only runs a few pods and
# theoretically it has less variance.
if ${RUN_BENCHMARK:-false}; then
kube::log::status "performance test (benchmark) compiling"
go test -c -o "perf.test"
kube::log::status "performance test (benchmark) start"
"./perf.test" -test.bench=. -test.run=xxxx -test.cpuprofile=prof.out -test.short=false
kube::log::status "...benchmark tests finished"
fi
# Running density tests. It might take a long time.
kube::log::status "performance test (density) start"
go test -test.run=. -test.timeout=60m -test.short=false
kube::log::status "...density tests finished"

View File

@@ -0,0 +1,50 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package benchmark
import (
"k8s.io/apimachinery/pkg/runtime/schema"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/pkg/scheduler"
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
"k8s.io/kubernetes/test/integration/util"
)
// mustSetupScheduler starts the following components:
// - k8s api server (a.k.a. master)
// - scheduler
// It returns scheduler config factory and destroyFunc which should be used to
// remove resources after finished.
// Notes on rate limiter:
// - client rate limit is set to 5000.
func mustSetupScheduler() (scheduler.Configurator, util.ShutdownFunc) {
apiURL, apiShutdown := util.StartApiserver()
clientSet := clientset.NewForConfigOrDie(&restclient.Config{
Host: apiURL,
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}},
QPS: 5000.0,
Burst: 5000,
})
schedulerConfig, schedulerShutdown := util.StartScheduler(clientSet)
shutdownFunc := func() {
schedulerShutdown()
apiShutdown()
}
return schedulerConfig, shutdownFunc
}