Add generated file
This PR adds generated files under pkg/client and vendor folder.
This commit is contained in:
140
vendor/k8s.io/kubernetes/pkg/scheduler/factory/BUILD
generated
vendored
Normal file
140
vendor/k8s.io/kubernetes/pkg/scheduler/factory/BUILD
generated
vendored
Normal file
@@ -0,0 +1,140 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cache_comparer.go",
|
||||
"factory.go",
|
||||
"plugins.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"signal_windows.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/factory",
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/api/validation:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/core:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//pkg/scheduler/volumebinder:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"cache_comparer_test.go",
|
||||
"factory_test.go",
|
||||
"plugins_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/testing:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/api/latest:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/core:go_default_library",
|
||||
"//pkg/scheduler/testing:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
161
vendor/k8s.io/kubernetes/pkg/scheduler/factory/cache_comparer.go
generated
vendored
Normal file
161
vendor/k8s.io/kubernetes/pkg/scheduler/factory/cache_comparer.go
generated
vendored
Normal file
@@ -0,0 +1,161 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package factory
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
v1beta1 "k8s.io/client-go/listers/policy/v1beta1"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/core"
|
||||
)
|
||||
|
||||
type cacheComparer struct {
|
||||
nodeLister corelisters.NodeLister
|
||||
podLister corelisters.PodLister
|
||||
pdbLister v1beta1.PodDisruptionBudgetLister
|
||||
cache schedulercache.Cache
|
||||
podQueue core.SchedulingQueue
|
||||
|
||||
compareStrategy
|
||||
}
|
||||
|
||||
func (c *cacheComparer) Compare() error {
|
||||
glog.V(3).Info("cache comparer started")
|
||||
defer glog.V(3).Info("cache comparer finished")
|
||||
|
||||
nodes, err := c.nodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pods, err := c.podLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pdbs, err := c.pdbLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
snapshot := c.cache.Snapshot()
|
||||
|
||||
waitingPods := c.podQueue.WaitingPods()
|
||||
|
||||
if missed, redundant := c.CompareNodes(nodes, snapshot.Nodes); len(missed)+len(redundant) != 0 {
|
||||
glog.Warningf("cache mismatch: missed nodes: %s; redundant nodes: %s", missed, redundant)
|
||||
}
|
||||
|
||||
if missed, redundant := c.ComparePods(pods, waitingPods, snapshot.Nodes); len(missed)+len(redundant) != 0 {
|
||||
glog.Warningf("cache mismatch: missed pods: %s; redundant pods: %s", missed, redundant)
|
||||
}
|
||||
|
||||
if missed, redundant := c.ComparePdbs(pdbs, snapshot.Pdbs); len(missed)+len(redundant) != 0 {
|
||||
glog.Warningf("cache mismatch: missed pdbs: %s; redundant pdbs: %s", missed, redundant)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type compareStrategy struct {
|
||||
}
|
||||
|
||||
func (c compareStrategy) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*schedulercache.NodeInfo) (missed, redundant []string) {
|
||||
actual := []string{}
|
||||
for _, node := range nodes {
|
||||
actual = append(actual, node.Name)
|
||||
}
|
||||
|
||||
cached := []string{}
|
||||
for nodeName := range nodeinfos {
|
||||
cached = append(cached, nodeName)
|
||||
}
|
||||
|
||||
return compareStrings(actual, cached)
|
||||
}
|
||||
|
||||
func (c compareStrategy) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[string]*schedulercache.NodeInfo) (missed, redundant []string) {
|
||||
actual := []string{}
|
||||
for _, pod := range pods {
|
||||
actual = append(actual, string(pod.UID))
|
||||
}
|
||||
|
||||
cached := []string{}
|
||||
for _, nodeinfo := range nodeinfos {
|
||||
for _, pod := range nodeinfo.Pods() {
|
||||
cached = append(cached, string(pod.UID))
|
||||
}
|
||||
}
|
||||
for _, pod := range waitingPods {
|
||||
cached = append(cached, string(pod.UID))
|
||||
}
|
||||
|
||||
return compareStrings(actual, cached)
|
||||
}
|
||||
|
||||
func (c compareStrategy) ComparePdbs(pdbs []*policy.PodDisruptionBudget, pdbCache map[string]*policy.PodDisruptionBudget) (missed, redundant []string) {
|
||||
actual := []string{}
|
||||
for _, pdb := range pdbs {
|
||||
actual = append(actual, string(pdb.UID))
|
||||
}
|
||||
|
||||
cached := []string{}
|
||||
for pdbUID := range pdbCache {
|
||||
cached = append(cached, pdbUID)
|
||||
}
|
||||
|
||||
return compareStrings(actual, cached)
|
||||
}
|
||||
|
||||
func compareStrings(actual, cached []string) (missed, redundant []string) {
|
||||
missed, redundant = []string{}, []string{}
|
||||
|
||||
sort.Strings(actual)
|
||||
sort.Strings(cached)
|
||||
|
||||
compare := func(i, j int) int {
|
||||
if i == len(actual) {
|
||||
return 1
|
||||
} else if j == len(cached) {
|
||||
return -1
|
||||
}
|
||||
return strings.Compare(actual[i], cached[j])
|
||||
}
|
||||
|
||||
for i, j := 0, 0; i < len(actual) || j < len(cached); {
|
||||
switch compare(i, j) {
|
||||
case 0:
|
||||
i++
|
||||
j++
|
||||
case -1:
|
||||
missed = append(missed, actual[i])
|
||||
i++
|
||||
case 1:
|
||||
redundant = append(redundant, cached[j])
|
||||
j++
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
228
vendor/k8s.io/kubernetes/pkg/scheduler/factory/cache_comparer_test.go
generated
vendored
Normal file
228
vendor/k8s.io/kubernetes/pkg/scheduler/factory/cache_comparer_test.go
generated
vendored
Normal file
@@ -0,0 +1,228 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package factory
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
func TestCompareNodes(t *testing.T) {
|
||||
compare := compareStrategy{}
|
||||
|
||||
tests := []struct {
|
||||
actual []string
|
||||
cached []string
|
||||
missing []string
|
||||
redundant []string
|
||||
}{
|
||||
{
|
||||
actual: []string{"foo", "bar"},
|
||||
cached: []string{"bar", "foo", "foobar"},
|
||||
missing: []string{},
|
||||
redundant: []string{"foobar"},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foo"},
|
||||
missing: []string{"foobar"},
|
||||
redundant: []string{},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foobar", "foo"},
|
||||
missing: []string{},
|
||||
redundant: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
nodes := []*v1.Node{}
|
||||
for _, nodeName := range test.actual {
|
||||
node := &v1.Node{}
|
||||
node.Name = nodeName
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
nodeInfo := make(map[string]*schedulercache.NodeInfo)
|
||||
for _, nodeName := range test.cached {
|
||||
nodeInfo[nodeName] = &schedulercache.NodeInfo{}
|
||||
}
|
||||
|
||||
m, r := compare.CompareNodes(nodes, nodeInfo)
|
||||
|
||||
if !reflect.DeepEqual(m, test.missing) {
|
||||
t.Errorf("missing expected to be %s; got %s", test.missing, m)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(r, test.redundant) {
|
||||
t.Errorf("redundant expected to be %s; got %s", test.redundant, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestComparePods(t *testing.T) {
|
||||
compare := compareStrategy{}
|
||||
|
||||
tests := []struct {
|
||||
actual []string
|
||||
cached []string
|
||||
queued []string
|
||||
missing []string
|
||||
redundant []string
|
||||
}{
|
||||
{
|
||||
actual: []string{"foo", "bar"},
|
||||
cached: []string{"bar", "foo", "foobar"},
|
||||
queued: []string{},
|
||||
missing: []string{},
|
||||
redundant: []string{"foobar"},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar"},
|
||||
cached: []string{"foo", "foobar"},
|
||||
queued: []string{"bar"},
|
||||
missing: []string{},
|
||||
redundant: []string{"foobar"},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foo"},
|
||||
queued: []string{},
|
||||
missing: []string{"foobar"},
|
||||
redundant: []string{},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"foo"},
|
||||
queued: []string{"bar"},
|
||||
missing: []string{"foobar"},
|
||||
redundant: []string{},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foobar", "foo"},
|
||||
queued: []string{},
|
||||
missing: []string{},
|
||||
redundant: []string{},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"foobar", "foo"},
|
||||
queued: []string{"bar"},
|
||||
missing: []string{},
|
||||
redundant: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
pods := []*v1.Pod{}
|
||||
for _, uid := range test.actual {
|
||||
pod := &v1.Pod{}
|
||||
pod.UID = types.UID(uid)
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
|
||||
queuedPods := []*v1.Pod{}
|
||||
for _, uid := range test.queued {
|
||||
pod := &v1.Pod{}
|
||||
pod.UID = types.UID(uid)
|
||||
queuedPods = append(queuedPods, pod)
|
||||
}
|
||||
|
||||
nodeInfo := make(map[string]*schedulercache.NodeInfo)
|
||||
for _, uid := range test.cached {
|
||||
pod := &v1.Pod{}
|
||||
pod.UID = types.UID(uid)
|
||||
pod.Namespace = "ns"
|
||||
pod.Name = uid
|
||||
|
||||
nodeInfo[uid] = schedulercache.NewNodeInfo(pod)
|
||||
}
|
||||
|
||||
m, r := compare.ComparePods(pods, queuedPods, nodeInfo)
|
||||
|
||||
if !reflect.DeepEqual(m, test.missing) {
|
||||
t.Errorf("missing expected to be %s; got %s", test.missing, m)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(r, test.redundant) {
|
||||
t.Errorf("redundant expected to be %s; got %s", test.redundant, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestComparePdbs(t *testing.T) {
|
||||
compare := compareStrategy{}
|
||||
|
||||
tests := []struct {
|
||||
actual []string
|
||||
cached []string
|
||||
missing []string
|
||||
redundant []string
|
||||
}{
|
||||
{
|
||||
actual: []string{"foo", "bar"},
|
||||
cached: []string{"bar", "foo", "foobar"},
|
||||
missing: []string{},
|
||||
redundant: []string{"foobar"},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foo"},
|
||||
missing: []string{"foobar"},
|
||||
redundant: []string{},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foobar", "foo"},
|
||||
missing: []string{},
|
||||
redundant: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
pdbs := []*policy.PodDisruptionBudget{}
|
||||
for _, uid := range test.actual {
|
||||
pdb := &policy.PodDisruptionBudget{}
|
||||
pdb.UID = types.UID(uid)
|
||||
pdbs = append(pdbs, pdb)
|
||||
}
|
||||
|
||||
cache := make(map[string]*policy.PodDisruptionBudget)
|
||||
for _, uid := range test.cached {
|
||||
pdb := &policy.PodDisruptionBudget{}
|
||||
pdb.UID = types.UID(uid)
|
||||
cache[uid] = pdb
|
||||
}
|
||||
|
||||
m, r := compare.ComparePdbs(pdbs, cache)
|
||||
|
||||
if !reflect.DeepEqual(m, test.missing) {
|
||||
t.Errorf("missing expected to be %s; got %s", test.missing, m)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(r, test.redundant) {
|
||||
t.Errorf("redundant expected to be %s; got %s", test.redundant, r)
|
||||
}
|
||||
}
|
||||
}
|
1448
vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory.go
generated
vendored
Normal file
1448
vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
647
vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory_test.go
generated
vendored
Normal file
647
vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory_test.go
generated
vendored
Normal file
@@ -0,0 +1,647 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package factory
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
apitesting "k8s.io/kubernetes/pkg/api/testing"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
latestschedulerapi "k8s.io/kubernetes/pkg/scheduler/api/latest"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/core"
|
||||
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
const (
|
||||
enableEquivalenceCache = true
|
||||
disablePodPreemption = false
|
||||
)
|
||||
|
||||
func TestCreate(t *testing.T) {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
factory.Create()
|
||||
}
|
||||
|
||||
// Test configures a scheduler from a policies defined in a file
|
||||
// It combines some configurable predicate/priorities with some pre-defined ones
|
||||
func TestCreateFromConfig(t *testing.T) {
|
||||
var configData []byte
|
||||
var policy schedulerapi.Policy
|
||||
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
|
||||
// Pre-register some predicate and priority functions
|
||||
RegisterFitPredicate("PredicateOne", PredicateOne)
|
||||
RegisterFitPredicate("PredicateTwo", PredicateTwo)
|
||||
RegisterPriorityFunction("PriorityOne", PriorityOne, 1)
|
||||
RegisterPriorityFunction("PriorityTwo", PriorityTwo, 1)
|
||||
|
||||
configData = []byte(`{
|
||||
"kind" : "Policy",
|
||||
"apiVersion" : "v1",
|
||||
"predicates" : [
|
||||
{"name" : "TestZoneAffinity", "argument" : {"serviceAffinity" : {"labels" : ["zone"]}}},
|
||||
{"name" : "TestRequireZone", "argument" : {"labelsPresence" : {"labels" : ["zone"], "presence" : true}}},
|
||||
{"name" : "PredicateOne"},
|
||||
{"name" : "PredicateTwo"}
|
||||
],
|
||||
"priorities" : [
|
||||
{"name" : "RackSpread", "weight" : 3, "argument" : {"serviceAntiAffinity" : {"label" : "rack"}}},
|
||||
{"name" : "PriorityOne", "weight" : 2},
|
||||
{"name" : "PriorityTwo", "weight" : 1} ]
|
||||
}`)
|
||||
if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil {
|
||||
t.Errorf("Invalid configuration: %v", err)
|
||||
}
|
||||
|
||||
factory.CreateFromConfig(policy)
|
||||
hpa := factory.GetHardPodAffinitySymmetricWeight()
|
||||
if hpa != v1.DefaultHardPodAffinitySymmetricWeight {
|
||||
t.Errorf("Wrong hardPodAffinitySymmetricWeight, ecpected: %d, got: %d", v1.DefaultHardPodAffinitySymmetricWeight, hpa)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateFromConfigWithHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
var configData []byte
|
||||
var policy schedulerapi.Policy
|
||||
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
|
||||
// Pre-register some predicate and priority functions
|
||||
RegisterFitPredicate("PredicateOne", PredicateOne)
|
||||
RegisterFitPredicate("PredicateTwo", PredicateTwo)
|
||||
RegisterPriorityFunction("PriorityOne", PriorityOne, 1)
|
||||
RegisterPriorityFunction("PriorityTwo", PriorityTwo, 1)
|
||||
|
||||
configData = []byte(`{
|
||||
"kind" : "Policy",
|
||||
"apiVersion" : "v1",
|
||||
"predicates" : [
|
||||
{"name" : "TestZoneAffinity", "argument" : {"serviceAffinity" : {"labels" : ["zone"]}}},
|
||||
{"name" : "TestRequireZone", "argument" : {"labelsPresence" : {"labels" : ["zone"], "presence" : true}}},
|
||||
{"name" : "PredicateOne"},
|
||||
{"name" : "PredicateTwo"}
|
||||
],
|
||||
"priorities" : [
|
||||
{"name" : "RackSpread", "weight" : 3, "argument" : {"serviceAntiAffinity" : {"label" : "rack"}}},
|
||||
{"name" : "PriorityOne", "weight" : 2},
|
||||
{"name" : "PriorityTwo", "weight" : 1}
|
||||
],
|
||||
"hardPodAffinitySymmetricWeight" : 10
|
||||
}`)
|
||||
if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil {
|
||||
t.Errorf("Invalid configuration: %v", err)
|
||||
}
|
||||
factory.CreateFromConfig(policy)
|
||||
hpa := factory.GetHardPodAffinitySymmetricWeight()
|
||||
if hpa != 10 {
|
||||
t.Errorf("Wrong hardPodAffinitySymmetricWeight, ecpected: %d, got: %d", 10, hpa)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateFromEmptyConfig(t *testing.T) {
|
||||
var configData []byte
|
||||
var policy schedulerapi.Policy
|
||||
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
|
||||
configData = []byte(`{}`)
|
||||
if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil {
|
||||
t.Errorf("Invalid configuration: %v", err)
|
||||
}
|
||||
|
||||
factory.CreateFromConfig(policy)
|
||||
}
|
||||
|
||||
// Test configures a scheduler from a policy that does not specify any
|
||||
// predicate/priority.
|
||||
// The predicate/priority from DefaultProvider will be used.
|
||||
func TestCreateFromConfigWithUnspecifiedPredicatesOrPriorities(t *testing.T) {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
|
||||
RegisterFitPredicate("PredicateOne", PredicateOne)
|
||||
RegisterPriorityFunction("PriorityOne", PriorityOne, 1)
|
||||
|
||||
RegisterAlgorithmProvider(DefaultProvider, sets.NewString("PredicateOne"), sets.NewString("PriorityOne"))
|
||||
|
||||
configData := []byte(`{
|
||||
"kind" : "Policy",
|
||||
"apiVersion" : "v1"
|
||||
}`)
|
||||
var policy schedulerapi.Policy
|
||||
if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil {
|
||||
t.Fatalf("Invalid configuration: %v", err)
|
||||
}
|
||||
|
||||
config, err := factory.CreateFromConfig(policy)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create scheduler from configuration: %v", err)
|
||||
}
|
||||
if _, found := config.Algorithm.Predicates()["PredicateOne"]; !found {
|
||||
t.Errorf("Expected predicate PredicateOne from %q", DefaultProvider)
|
||||
}
|
||||
if len(config.Algorithm.Prioritizers()) != 1 || config.Algorithm.Prioritizers()[0].Name != "PriorityOne" {
|
||||
t.Errorf("Expected priority PriorityOne from %q", DefaultProvider)
|
||||
}
|
||||
}
|
||||
|
||||
// Test configures a scheduler from a policy that contains empty
|
||||
// predicate/priority.
|
||||
// Empty predicate/priority sets will be used.
|
||||
func TestCreateFromConfigWithEmptyPredicatesOrPriorities(t *testing.T) {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
|
||||
RegisterFitPredicate("PredicateOne", PredicateOne)
|
||||
RegisterPriorityFunction("PriorityOne", PriorityOne, 1)
|
||||
|
||||
RegisterAlgorithmProvider(DefaultProvider, sets.NewString("PredicateOne"), sets.NewString("PriorityOne"))
|
||||
|
||||
configData := []byte(`{
|
||||
"kind" : "Policy",
|
||||
"apiVersion" : "v1",
|
||||
"predicates" : [],
|
||||
"priorities" : []
|
||||
}`)
|
||||
var policy schedulerapi.Policy
|
||||
if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil {
|
||||
t.Fatalf("Invalid configuration: %v", err)
|
||||
}
|
||||
|
||||
config, err := factory.CreateFromConfig(policy)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create scheduler from configuration: %v", err)
|
||||
}
|
||||
if len(config.Algorithm.Predicates()) != 0 {
|
||||
t.Error("Expected empty predicate sets")
|
||||
}
|
||||
if len(config.Algorithm.Prioritizers()) != 0 {
|
||||
t.Error("Expected empty priority sets")
|
||||
}
|
||||
}
|
||||
|
||||
func PredicateOne(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
func PredicateTwo(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||
return []schedulerapi.HostPriority{}, nil
|
||||
}
|
||||
|
||||
func PriorityTwo(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||
return []schedulerapi.HostPriority{}, nil
|
||||
}
|
||||
|
||||
func TestDefaultErrorFunc(t *testing.T) {
|
||||
testPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"},
|
||||
Spec: apitesting.V1DeepEqualSafePodSpec(),
|
||||
}
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 200,
|
||||
ResponseBody: runtime.EncodeOrDie(schedulertesting.Test.Codec(), testPod),
|
||||
T: t,
|
||||
}
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// FakeHandler mustn't be sent requests other than the one you want to test.
|
||||
mux.Handle(schedulertesting.Test.ResourcePath(string(v1.ResourcePods), "bar", "foo"), &handler)
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
queue := &core.FIFO{FIFO: cache.NewFIFO(cache.MetaNamespaceKeyFunc)}
|
||||
podBackoff := util.CreatePodBackoff(1*time.Millisecond, 1*time.Second)
|
||||
errFunc := factory.MakeDefaultErrorFunc(podBackoff, queue)
|
||||
|
||||
errFunc(testPod, nil)
|
||||
for {
|
||||
// This is a terrible way to do this but I plan on replacing this
|
||||
// whole error handling system in the future. The test will time
|
||||
// out if something doesn't work.
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
got, exists, _ := queue.Get(testPod)
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
handler.ValidateRequest(t, schedulertesting.Test.ResourcePath(string(v1.ResourcePods), "bar", "foo"), "GET", nil)
|
||||
if e, a := testPod, got; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("Expected %v, got %v", e, a)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeEnumerator(t *testing.T) {
|
||||
testList := &v1.NodeList{
|
||||
Items: []v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "foo"}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "bar"}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "baz"}},
|
||||
},
|
||||
}
|
||||
me := nodeEnumerator{testList}
|
||||
|
||||
if e, a := 3, me.Len(); e != a {
|
||||
t.Fatalf("expected %v, got %v", e, a)
|
||||
}
|
||||
for i := range testList.Items {
|
||||
gotObj := me.Get(i)
|
||||
if e, a := testList.Items[i].Name, gotObj.(*v1.Node).Name; e != a {
|
||||
t.Errorf("Expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := &testList.Items[i], gotObj; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("Expected %#v, got %v#", e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBind(t *testing.T) {
|
||||
table := []struct {
|
||||
binding *v1.Binding
|
||||
}{
|
||||
{binding: &v1.Binding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
Name: "foo",
|
||||
},
|
||||
Target: v1.ObjectReference{
|
||||
Name: "foohost.kubernetes.mydomain.com",
|
||||
},
|
||||
}},
|
||||
}
|
||||
|
||||
for _, item := range table {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 200,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
b := binder{client}
|
||||
|
||||
if err := b.Bind(item.binding); err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
continue
|
||||
}
|
||||
expectedBody := runtime.EncodeOrDie(schedulertesting.Test.Codec(), item.binding)
|
||||
handler.ValidateRequest(t,
|
||||
schedulertesting.Test.SubResourcePath(string(v1.ResourcePods), metav1.NamespaceDefault, "foo", "binding"),
|
||||
"POST", &expectedBody)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
// factory of "default-scheduler"
|
||||
factory := newConfigFactory(client, -1)
|
||||
_, err := factory.Create()
|
||||
if err == nil {
|
||||
t.Errorf("expected err: invalid hardPodAffinitySymmetricWeight, got nothing")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidFactoryArgs(t *testing.T) {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
testCases := []struct {
|
||||
hardPodAffinitySymmetricWeight int32
|
||||
expectErr string
|
||||
}{
|
||||
{
|
||||
hardPodAffinitySymmetricWeight: -1,
|
||||
expectErr: "invalid hardPodAffinitySymmetricWeight: -1, must be in the range 0-100",
|
||||
},
|
||||
{
|
||||
hardPodAffinitySymmetricWeight: 101,
|
||||
expectErr: "invalid hardPodAffinitySymmetricWeight: 101, must be in the range 0-100",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
factory := newConfigFactory(client, test.hardPodAffinitySymmetricWeight)
|
||||
_, err := factory.Create()
|
||||
if err == nil {
|
||||
t.Errorf("expected err: %s, got nothing", test.expectErr)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSkipPodUpdate(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
pod *v1.Pod
|
||||
isAssumedPodFunc func(*v1.Pod) bool
|
||||
getPodFunc func(*v1.Pod) *v1.Pod
|
||||
expected bool
|
||||
}{
|
||||
// Non-assumed pod should not be skipped.
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-0",
|
||||
},
|
||||
},
|
||||
isAssumedPodFunc: func(*v1.Pod) bool { return false },
|
||||
getPodFunc: func(*v1.Pod) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-0",
|
||||
},
|
||||
}
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
// Pod update (with changes on ResourceVersion, Spec.NodeName and/or
|
||||
// Annotations) for an already assumed pod should be skipped.
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-0",
|
||||
Annotations: map[string]string{"a": "b"},
|
||||
ResourceVersion: "0",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "node-0",
|
||||
},
|
||||
},
|
||||
isAssumedPodFunc: func(*v1.Pod) bool {
|
||||
return true
|
||||
},
|
||||
getPodFunc: func(*v1.Pod) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-0",
|
||||
Annotations: map[string]string{"c": "d"},
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "node-1",
|
||||
},
|
||||
}
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
// Pod update (with changes on Labels) for an already assumed pod
|
||||
// should not be skipped.
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-0",
|
||||
Labels: map[string]string{"a": "b"},
|
||||
},
|
||||
},
|
||||
isAssumedPodFunc: func(*v1.Pod) bool {
|
||||
return true
|
||||
},
|
||||
getPodFunc: func(*v1.Pod) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-0",
|
||||
Labels: map[string]string{"c": "d"},
|
||||
},
|
||||
}
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
} {
|
||||
c := &configFactory{
|
||||
schedulerCache: &schedulertesting.FakeCache{
|
||||
IsAssumedPodFunc: test.isAssumedPodFunc,
|
||||
GetPodFunc: test.getPodFunc,
|
||||
},
|
||||
}
|
||||
got := c.skipPodUpdate(test.pod)
|
||||
if got != test.expected {
|
||||
t.Errorf("skipPodUpdate() = %t, expected = %t", got, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newConfigFactory(client *clientset.Clientset, hardPodAffinitySymmetricWeight int32) scheduler.Configurator {
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
return NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
client,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
hardPodAffinitySymmetricWeight,
|
||||
enableEquivalenceCache,
|
||||
disablePodPreemption,
|
||||
)
|
||||
}
|
||||
|
||||
type fakeExtender struct {
|
||||
isBinder bool
|
||||
interestedPodName string
|
||||
ignorable bool
|
||||
}
|
||||
|
||||
func (f *fakeExtender) IsIgnorable() bool {
|
||||
return f.ignorable
|
||||
}
|
||||
|
||||
func (f *fakeExtender) ProcessPreemption(
|
||||
pod *v1.Pod,
|
||||
nodeToVictims map[*v1.Node]*schedulerapi.Victims,
|
||||
nodeNameToInfo map[string]*schedulercache.NodeInfo,
|
||||
) (map[*v1.Node]*schedulerapi.Victims, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (f *fakeExtender) SupportsPreemption() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *fakeExtender) Filter(
|
||||
pod *v1.Pod,
|
||||
nodes []*v1.Node,
|
||||
nodeNameToInfo map[string]*schedulercache.NodeInfo,
|
||||
) (filteredNodes []*v1.Node, failedNodesMap schedulerapi.FailedNodesMap, err error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (f *fakeExtender) Prioritize(
|
||||
pod *v1.Pod,
|
||||
nodes []*v1.Node,
|
||||
) (hostPriorities *schedulerapi.HostPriorityList, weight int, err error) {
|
||||
return nil, 0, nil
|
||||
}
|
||||
|
||||
func (f *fakeExtender) Bind(binding *v1.Binding) error {
|
||||
if f.isBinder {
|
||||
return nil
|
||||
}
|
||||
return errors.New("not a binder")
|
||||
}
|
||||
|
||||
func (f *fakeExtender) IsBinder() bool {
|
||||
return f.isBinder
|
||||
}
|
||||
|
||||
func (f *fakeExtender) IsInterested(pod *v1.Pod) bool {
|
||||
return pod != nil && pod.Name == f.interestedPodName
|
||||
}
|
||||
|
||||
func TestGetBinderFunc(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
podName string
|
||||
extenders []algorithm.SchedulerExtender
|
||||
|
||||
expectedBinderType string
|
||||
}{
|
||||
// Expect to return the default binder because the extender is not a
|
||||
// binder, even though it's interested in the pod.
|
||||
{
|
||||
podName: "pod0",
|
||||
extenders: []algorithm.SchedulerExtender{
|
||||
&fakeExtender{isBinder: false, interestedPodName: "pod0"},
|
||||
},
|
||||
expectedBinderType: "*factory.binder",
|
||||
},
|
||||
// Expect to return the fake binder because one of the extenders is a
|
||||
// binder and it's interested in the pod.
|
||||
{
|
||||
podName: "pod0",
|
||||
extenders: []algorithm.SchedulerExtender{
|
||||
&fakeExtender{isBinder: false, interestedPodName: "pod0"},
|
||||
&fakeExtender{isBinder: true, interestedPodName: "pod0"},
|
||||
},
|
||||
expectedBinderType: "*factory.fakeExtender",
|
||||
},
|
||||
// Expect to return the default binder because one of the extenders is
|
||||
// a binder but the binder is not interested in the pod.
|
||||
{
|
||||
podName: "pod1",
|
||||
extenders: []algorithm.SchedulerExtender{
|
||||
&fakeExtender{isBinder: false, interestedPodName: "pod1"},
|
||||
&fakeExtender{isBinder: true, interestedPodName: "pod0"},
|
||||
},
|
||||
expectedBinderType: "*factory.binder",
|
||||
},
|
||||
} {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: test.podName,
|
||||
},
|
||||
}
|
||||
|
||||
f := &configFactory{}
|
||||
binderFunc := f.getBinderFunc(test.extenders)
|
||||
binder := binderFunc(pod)
|
||||
|
||||
binderType := fmt.Sprintf("%s", reflect.TypeOf(binder))
|
||||
if binderType != test.expectedBinderType {
|
||||
t.Errorf("Expected binder %q but got %q", test.expectedBinderType, binderType)
|
||||
}
|
||||
}
|
||||
}
|
560
vendor/k8s.io/kubernetes/pkg/scheduler/factory/plugins.go
generated
vendored
Normal file
560
vendor/k8s.io/kubernetes/pkg/scheduler/factory/plugins.go
generated
vendored
Normal file
@@ -0,0 +1,560 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package factory
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// PluginFactoryArgs are passed to all plugin factory functions.
|
||||
type PluginFactoryArgs struct {
|
||||
PodLister algorithm.PodLister
|
||||
ServiceLister algorithm.ServiceLister
|
||||
ControllerLister algorithm.ControllerLister
|
||||
ReplicaSetLister algorithm.ReplicaSetLister
|
||||
StatefulSetLister algorithm.StatefulSetLister
|
||||
NodeLister algorithm.NodeLister
|
||||
NodeInfo predicates.NodeInfo
|
||||
PVInfo predicates.PersistentVolumeInfo
|
||||
PVCInfo predicates.PersistentVolumeClaimInfo
|
||||
StorageClassInfo predicates.StorageClassInfo
|
||||
VolumeBinder *volumebinder.VolumeBinder
|
||||
HardPodAffinitySymmetricWeight int32
|
||||
}
|
||||
|
||||
// PriorityMetadataProducerFactory produces PriorityMetadataProducer from the given args.
|
||||
type PriorityMetadataProducerFactory func(PluginFactoryArgs) algorithm.PriorityMetadataProducer
|
||||
|
||||
// PredicateMetadataProducerFactory produces PredicateMetadataProducer from the given args.
|
||||
type PredicateMetadataProducerFactory func(PluginFactoryArgs) algorithm.PredicateMetadataProducer
|
||||
|
||||
// FitPredicateFactory produces a FitPredicate from the given args.
|
||||
type FitPredicateFactory func(PluginFactoryArgs) algorithm.FitPredicate
|
||||
|
||||
// PriorityFunctionFactory produces a PriorityConfig from the given args.
|
||||
// DEPRECATED
|
||||
// Use Map-Reduce pattern for priority functions.
|
||||
type PriorityFunctionFactory func(PluginFactoryArgs) algorithm.PriorityFunction
|
||||
|
||||
// PriorityFunctionFactory2 produces map & reduce priority functions
|
||||
// from a given args.
|
||||
// FIXME: Rename to PriorityFunctionFactory.
|
||||
type PriorityFunctionFactory2 func(PluginFactoryArgs) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction)
|
||||
|
||||
// PriorityConfigFactory produces a PriorityConfig from the given function and weight
|
||||
type PriorityConfigFactory struct {
|
||||
Function PriorityFunctionFactory
|
||||
MapReduceFunction PriorityFunctionFactory2
|
||||
Weight int
|
||||
}
|
||||
|
||||
var (
|
||||
schedulerFactoryMutex sync.Mutex
|
||||
|
||||
// maps that hold registered algorithm types
|
||||
fitPredicateMap = make(map[string]FitPredicateFactory)
|
||||
mandatoryFitPredicates = sets.NewString()
|
||||
priorityFunctionMap = make(map[string]PriorityConfigFactory)
|
||||
algorithmProviderMap = make(map[string]AlgorithmProviderConfig)
|
||||
|
||||
// Registered metadata producers
|
||||
priorityMetadataProducer PriorityMetadataProducerFactory
|
||||
predicateMetadataProducer PredicateMetadataProducerFactory
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultProvider defines the default algorithm provider name.
|
||||
DefaultProvider = "DefaultProvider"
|
||||
)
|
||||
|
||||
// AlgorithmProviderConfig is used to store the configuration of algorithm providers.
|
||||
type AlgorithmProviderConfig struct {
|
||||
FitPredicateKeys sets.String
|
||||
PriorityFunctionKeys sets.String
|
||||
}
|
||||
|
||||
// RegisterFitPredicate registers a fit predicate with the algorithm
|
||||
// registry. Returns the name with which the predicate was registered.
|
||||
func RegisterFitPredicate(name string, predicate algorithm.FitPredicate) string {
|
||||
return RegisterFitPredicateFactory(name, func(PluginFactoryArgs) algorithm.FitPredicate { return predicate })
|
||||
}
|
||||
|
||||
// RemoveFitPredicate removes a fit predicate from factory.
|
||||
func RemoveFitPredicate(name string) {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
|
||||
validateAlgorithmNameOrDie(name)
|
||||
delete(fitPredicateMap, name)
|
||||
mandatoryFitPredicates.Delete(name)
|
||||
}
|
||||
|
||||
// RemovePredicateKeyFromAlgoProvider removes a fit predicate key from algorithmProvider.
|
||||
func RemovePredicateKeyFromAlgoProvider(providerName, key string) error {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
|
||||
validateAlgorithmNameOrDie(providerName)
|
||||
provider, ok := algorithmProviderMap[providerName]
|
||||
if !ok {
|
||||
return fmt.Errorf("plugin %v has not been registered", providerName)
|
||||
}
|
||||
provider.FitPredicateKeys.Delete(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemovePredicateKeyFromAlgorithmProviderMap removes a fit predicate key from all algorithmProviders which in algorithmProviderMap.
|
||||
func RemovePredicateKeyFromAlgorithmProviderMap(key string) {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
|
||||
for _, provider := range algorithmProviderMap {
|
||||
provider.FitPredicateKeys.Delete(key)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// InsertPredicateKeyToAlgoProvider insert a fit predicate key to algorithmProvider.
|
||||
func InsertPredicateKeyToAlgoProvider(providerName, key string) error {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
|
||||
validateAlgorithmNameOrDie(providerName)
|
||||
provider, ok := algorithmProviderMap[providerName]
|
||||
if !ok {
|
||||
return fmt.Errorf("plugin %v has not been registered", providerName)
|
||||
}
|
||||
provider.FitPredicateKeys.Insert(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertPredicateKeyToAlgorithmProviderMap insert a fit predicate key to all algorithmProviders which in algorithmProviderMap.
|
||||
func InsertPredicateKeyToAlgorithmProviderMap(key string) {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
|
||||
for _, provider := range algorithmProviderMap {
|
||||
provider.FitPredicateKeys.Insert(key)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// RegisterMandatoryFitPredicate registers a fit predicate with the algorithm registry, the predicate is used by
|
||||
// kubelet, DaemonSet; it is always included in configuration. Returns the name with which the predicate was
|
||||
// registered.
|
||||
func RegisterMandatoryFitPredicate(name string, predicate algorithm.FitPredicate) string {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
validateAlgorithmNameOrDie(name)
|
||||
fitPredicateMap[name] = func(PluginFactoryArgs) algorithm.FitPredicate { return predicate }
|
||||
mandatoryFitPredicates.Insert(name)
|
||||
return name
|
||||
}
|
||||
|
||||
// RegisterFitPredicateFactory registers a fit predicate factory with the
|
||||
// algorithm registry. Returns the name with which the predicate was registered.
|
||||
func RegisterFitPredicateFactory(name string, predicateFactory FitPredicateFactory) string {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
validateAlgorithmNameOrDie(name)
|
||||
fitPredicateMap[name] = predicateFactory
|
||||
return name
|
||||
}
|
||||
|
||||
// RegisterCustomFitPredicate registers a custom fit predicate with the algorithm registry.
|
||||
// Returns the name, with which the predicate was registered.
|
||||
func RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy) string {
|
||||
var predicateFactory FitPredicateFactory
|
||||
var ok bool
|
||||
|
||||
validatePredicateOrDie(policy)
|
||||
|
||||
// generate the predicate function, if a custom type is requested
|
||||
if policy.Argument != nil {
|
||||
if policy.Argument.ServiceAffinity != nil {
|
||||
predicateFactory = func(args PluginFactoryArgs) algorithm.FitPredicate {
|
||||
predicate, precomputationFunction := predicates.NewServiceAffinityPredicate(
|
||||
args.PodLister,
|
||||
args.ServiceLister,
|
||||
args.NodeInfo,
|
||||
policy.Argument.ServiceAffinity.Labels,
|
||||
)
|
||||
|
||||
// Once we generate the predicate we should also Register the Precomputation
|
||||
predicates.RegisterPredicateMetadataProducer(policy.Name, precomputationFunction)
|
||||
return predicate
|
||||
}
|
||||
} else if policy.Argument.LabelsPresence != nil {
|
||||
predicateFactory = func(args PluginFactoryArgs) algorithm.FitPredicate {
|
||||
return predicates.NewNodeLabelPredicate(
|
||||
policy.Argument.LabelsPresence.Labels,
|
||||
policy.Argument.LabelsPresence.Presence,
|
||||
)
|
||||
}
|
||||
}
|
||||
} else if predicateFactory, ok = fitPredicateMap[policy.Name]; ok {
|
||||
// checking to see if a pre-defined predicate is requested
|
||||
glog.V(2).Infof("Predicate type %s already registered, reusing.", policy.Name)
|
||||
return policy.Name
|
||||
}
|
||||
|
||||
if predicateFactory == nil {
|
||||
glog.Fatalf("Invalid configuration: Predicate type not found for %s", policy.Name)
|
||||
}
|
||||
|
||||
return RegisterFitPredicateFactory(policy.Name, predicateFactory)
|
||||
}
|
||||
|
||||
// IsFitPredicateRegistered is useful for testing providers.
|
||||
func IsFitPredicateRegistered(name string) bool {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
_, ok := fitPredicateMap[name]
|
||||
return ok
|
||||
}
|
||||
|
||||
// RegisterPriorityMetadataProducerFactory registers a PriorityMetadataProducerFactory.
|
||||
func RegisterPriorityMetadataProducerFactory(factory PriorityMetadataProducerFactory) {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
priorityMetadataProducer = factory
|
||||
}
|
||||
|
||||
// RegisterPredicateMetadataProducerFactory registers a PredicateMetadataProducerFactory.
|
||||
func RegisterPredicateMetadataProducerFactory(factory PredicateMetadataProducerFactory) {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
predicateMetadataProducer = factory
|
||||
}
|
||||
|
||||
// RegisterPriorityFunction registers a priority function with the algorithm registry. Returns the name,
|
||||
// with which the function was registered.
|
||||
// DEPRECATED
|
||||
// Use Map-Reduce pattern for priority functions.
|
||||
func RegisterPriorityFunction(name string, function algorithm.PriorityFunction, weight int) string {
|
||||
return RegisterPriorityConfigFactory(name, PriorityConfigFactory{
|
||||
Function: func(PluginFactoryArgs) algorithm.PriorityFunction {
|
||||
return function
|
||||
},
|
||||
Weight: weight,
|
||||
})
|
||||
}
|
||||
|
||||
// RegisterPriorityFunction2 registers a priority function with the algorithm registry. Returns the name,
|
||||
// with which the function was registered.
|
||||
// FIXME: Rename to PriorityFunctionFactory.
|
||||
func RegisterPriorityFunction2(
|
||||
name string,
|
||||
mapFunction algorithm.PriorityMapFunction,
|
||||
reduceFunction algorithm.PriorityReduceFunction,
|
||||
weight int) string {
|
||||
return RegisterPriorityConfigFactory(name, PriorityConfigFactory{
|
||||
MapReduceFunction: func(PluginFactoryArgs) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) {
|
||||
return mapFunction, reduceFunction
|
||||
},
|
||||
Weight: weight,
|
||||
})
|
||||
}
|
||||
|
||||
// RegisterPriorityConfigFactory registers a priority config factory with its name.
|
||||
func RegisterPriorityConfigFactory(name string, pcf PriorityConfigFactory) string {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
validateAlgorithmNameOrDie(name)
|
||||
priorityFunctionMap[name] = pcf
|
||||
return name
|
||||
}
|
||||
|
||||
// RegisterCustomPriorityFunction registers a custom priority function with the algorithm registry.
|
||||
// Returns the name, with which the priority function was registered.
|
||||
func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string {
|
||||
var pcf *PriorityConfigFactory
|
||||
|
||||
validatePriorityOrDie(policy)
|
||||
|
||||
// generate the priority function, if a custom priority is requested
|
||||
if policy.Argument != nil {
|
||||
if policy.Argument.ServiceAntiAffinity != nil {
|
||||
pcf = &PriorityConfigFactory{
|
||||
MapReduceFunction: func(args PluginFactoryArgs) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) {
|
||||
return priorities.NewServiceAntiAffinityPriority(
|
||||
args.PodLister,
|
||||
args.ServiceLister,
|
||||
policy.Argument.ServiceAntiAffinity.Label,
|
||||
)
|
||||
},
|
||||
Weight: policy.Weight,
|
||||
}
|
||||
} else if policy.Argument.LabelPreference != nil {
|
||||
pcf = &PriorityConfigFactory{
|
||||
MapReduceFunction: func(args PluginFactoryArgs) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) {
|
||||
return priorities.NewNodeLabelPriority(
|
||||
policy.Argument.LabelPreference.Label,
|
||||
policy.Argument.LabelPreference.Presence,
|
||||
)
|
||||
},
|
||||
Weight: policy.Weight,
|
||||
}
|
||||
} else if policy.Argument.RequestedToCapacityRatioArguments != nil {
|
||||
pcf = &PriorityConfigFactory{
|
||||
MapReduceFunction: func(args PluginFactoryArgs) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) {
|
||||
scoringFunctionShape := buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(policy.Argument.RequestedToCapacityRatioArguments)
|
||||
p := priorities.RequestedToCapacityRatioResourceAllocationPriority(scoringFunctionShape)
|
||||
return p.PriorityMap, nil
|
||||
},
|
||||
Weight: policy.Weight,
|
||||
}
|
||||
}
|
||||
} else if existingPcf, ok := priorityFunctionMap[policy.Name]; ok {
|
||||
glog.V(2).Infof("Priority type %s already registered, reusing.", policy.Name)
|
||||
// set/update the weight based on the policy
|
||||
pcf = &PriorityConfigFactory{
|
||||
Function: existingPcf.Function,
|
||||
MapReduceFunction: existingPcf.MapReduceFunction,
|
||||
Weight: policy.Weight,
|
||||
}
|
||||
}
|
||||
|
||||
if pcf == nil {
|
||||
glog.Fatalf("Invalid configuration: Priority type not found for %s", policy.Name)
|
||||
}
|
||||
|
||||
return RegisterPriorityConfigFactory(policy.Name, *pcf)
|
||||
}
|
||||
|
||||
func buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(arguments *schedulerapi.RequestedToCapacityRatioArguments) priorities.FunctionShape {
|
||||
n := len(arguments.UtilizationShape)
|
||||
points := make([]priorities.FunctionShapePoint, 0, n)
|
||||
for _, point := range arguments.UtilizationShape {
|
||||
points = append(points, priorities.FunctionShapePoint{Utilization: int64(point.Utilization), Score: int64(point.Score)})
|
||||
}
|
||||
shape, err := priorities.NewFunctionShape(points)
|
||||
if err != nil {
|
||||
glog.Fatalf("invalid RequestedToCapacityRatioPriority arguments: %s", err.Error())
|
||||
}
|
||||
return shape
|
||||
}
|
||||
|
||||
// IsPriorityFunctionRegistered is useful for testing providers.
|
||||
func IsPriorityFunctionRegistered(name string) bool {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
_, ok := priorityFunctionMap[name]
|
||||
return ok
|
||||
}
|
||||
|
||||
// RegisterAlgorithmProvider registers a new algorithm provider with the algorithm registry. This should
|
||||
// be called from the init function in a provider plugin.
|
||||
func RegisterAlgorithmProvider(name string, predicateKeys, priorityKeys sets.String) string {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
validateAlgorithmNameOrDie(name)
|
||||
algorithmProviderMap[name] = AlgorithmProviderConfig{
|
||||
FitPredicateKeys: predicateKeys,
|
||||
PriorityFunctionKeys: priorityKeys,
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// GetAlgorithmProvider should not be used to modify providers. It is publicly visible for testing.
|
||||
func GetAlgorithmProvider(name string) (*AlgorithmProviderConfig, error) {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
|
||||
provider, ok := algorithmProviderMap[name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("plugin %q has not been registered", name)
|
||||
}
|
||||
|
||||
return &provider, nil
|
||||
}
|
||||
|
||||
func getFitPredicateFunctions(names sets.String, args PluginFactoryArgs) (map[string]algorithm.FitPredicate, error) {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
|
||||
predicates := map[string]algorithm.FitPredicate{}
|
||||
for _, name := range names.List() {
|
||||
factory, ok := fitPredicateMap[name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Invalid predicate name %q specified - no corresponding function found", name)
|
||||
}
|
||||
predicates[name] = factory(args)
|
||||
}
|
||||
|
||||
// Always include mandatory fit predicates.
|
||||
for name := range mandatoryFitPredicates {
|
||||
if factory, found := fitPredicateMap[name]; found {
|
||||
predicates[name] = factory(args)
|
||||
}
|
||||
}
|
||||
|
||||
return predicates, nil
|
||||
}
|
||||
|
||||
func getPriorityMetadataProducer(args PluginFactoryArgs) (algorithm.PriorityMetadataProducer, error) {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
|
||||
if priorityMetadataProducer == nil {
|
||||
return algorithm.EmptyPriorityMetadataProducer, nil
|
||||
}
|
||||
return priorityMetadataProducer(args), nil
|
||||
}
|
||||
|
||||
func getPredicateMetadataProducer(args PluginFactoryArgs) (algorithm.PredicateMetadataProducer, error) {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
|
||||
if predicateMetadataProducer == nil {
|
||||
return algorithm.EmptyPredicateMetadataProducer, nil
|
||||
}
|
||||
return predicateMetadataProducer(args), nil
|
||||
}
|
||||
|
||||
func getPriorityFunctionConfigs(names sets.String, args PluginFactoryArgs) ([]algorithm.PriorityConfig, error) {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
|
||||
configs := []algorithm.PriorityConfig{}
|
||||
for _, name := range names.List() {
|
||||
factory, ok := priorityFunctionMap[name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Invalid priority name %s specified - no corresponding function found", name)
|
||||
}
|
||||
if factory.Function != nil {
|
||||
configs = append(configs, algorithm.PriorityConfig{
|
||||
Name: name,
|
||||
Function: factory.Function(args),
|
||||
Weight: factory.Weight,
|
||||
})
|
||||
} else {
|
||||
mapFunction, reduceFunction := factory.MapReduceFunction(args)
|
||||
configs = append(configs, algorithm.PriorityConfig{
|
||||
Name: name,
|
||||
Map: mapFunction,
|
||||
Reduce: reduceFunction,
|
||||
Weight: factory.Weight,
|
||||
})
|
||||
}
|
||||
}
|
||||
if err := validateSelectedConfigs(configs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return configs, nil
|
||||
}
|
||||
|
||||
// validateSelectedConfigs validates the config weights to avoid the overflow.
|
||||
func validateSelectedConfigs(configs []algorithm.PriorityConfig) error {
|
||||
var totalPriority int
|
||||
for _, config := range configs {
|
||||
// Checks totalPriority against MaxTotalPriority to avoid overflow
|
||||
if config.Weight*schedulerapi.MaxPriority > schedulerapi.MaxTotalPriority-totalPriority {
|
||||
return fmt.Errorf("Total priority of priority functions has overflown")
|
||||
}
|
||||
totalPriority += config.Weight * schedulerapi.MaxPriority
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var validName = regexp.MustCompile("^[a-zA-Z0-9]([-a-zA-Z0-9]*[a-zA-Z0-9])$")
|
||||
|
||||
func validateAlgorithmNameOrDie(name string) {
|
||||
if !validName.MatchString(name) {
|
||||
glog.Fatalf("Algorithm name %v does not match the name validation regexp \"%v\".", name, validName)
|
||||
}
|
||||
}
|
||||
|
||||
func validatePredicateOrDie(predicate schedulerapi.PredicatePolicy) {
|
||||
if predicate.Argument != nil {
|
||||
numArgs := 0
|
||||
if predicate.Argument.ServiceAffinity != nil {
|
||||
numArgs++
|
||||
}
|
||||
if predicate.Argument.LabelsPresence != nil {
|
||||
numArgs++
|
||||
}
|
||||
if numArgs != 1 {
|
||||
glog.Fatalf("Exactly 1 predicate argument is required, numArgs: %v, Predicate: %s", numArgs, predicate.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func validatePriorityOrDie(priority schedulerapi.PriorityPolicy) {
|
||||
if priority.Argument != nil {
|
||||
numArgs := 0
|
||||
if priority.Argument.ServiceAntiAffinity != nil {
|
||||
numArgs++
|
||||
}
|
||||
if priority.Argument.LabelPreference != nil {
|
||||
numArgs++
|
||||
}
|
||||
if priority.Argument.RequestedToCapacityRatioArguments != nil {
|
||||
numArgs++
|
||||
}
|
||||
if numArgs != 1 {
|
||||
glog.Fatalf("Exactly 1 priority argument is required, numArgs: %v, Priority: %s", numArgs, priority.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ListRegisteredFitPredicates returns the registered fit predicates.
|
||||
func ListRegisteredFitPredicates() []string {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
|
||||
names := []string{}
|
||||
for name := range fitPredicateMap {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// ListRegisteredPriorityFunctions returns the registered priority functions.
|
||||
func ListRegisteredPriorityFunctions() []string {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
|
||||
names := []string{}
|
||||
for name := range priorityFunctionMap {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// ListAlgorithmProviders is called when listing all available algorithm providers in `kube-scheduler --help`
|
||||
func ListAlgorithmProviders() string {
|
||||
var availableAlgorithmProviders []string
|
||||
for name := range algorithmProviderMap {
|
||||
availableAlgorithmProviders = append(availableAlgorithmProviders, name)
|
||||
}
|
||||
sort.Strings(availableAlgorithmProviders)
|
||||
return strings.Join(availableAlgorithmProviders, " | ")
|
||||
}
|
100
vendor/k8s.io/kubernetes/pkg/scheduler/factory/plugins_test.go
generated
vendored
Normal file
100
vendor/k8s.io/kubernetes/pkg/scheduler/factory/plugins_test.go
generated
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package factory
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
|
||||
"k8s.io/kubernetes/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
func TestAlgorithmNameValidation(t *testing.T) {
|
||||
algorithmNamesShouldValidate := []string{
|
||||
"1SomeAlgo1rithm",
|
||||
"someAlgor-ithm1",
|
||||
}
|
||||
algorithmNamesShouldNotValidate := []string{
|
||||
"-SomeAlgorithm",
|
||||
"SomeAlgorithm-",
|
||||
"Some,Alg:orithm",
|
||||
}
|
||||
for _, name := range algorithmNamesShouldValidate {
|
||||
if !validName.MatchString(name) {
|
||||
t.Errorf("%v should be a valid algorithm name but is not valid.", name)
|
||||
}
|
||||
}
|
||||
for _, name := range algorithmNamesShouldNotValidate {
|
||||
if validName.MatchString(name) {
|
||||
t.Errorf("%v should be an invalid algorithm name but is valid.", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePriorityConfigOverFlow(t *testing.T) {
|
||||
tests := []struct {
|
||||
description string
|
||||
configs []algorithm.PriorityConfig
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
description: "one of the weights is MaxInt",
|
||||
configs: []algorithm.PriorityConfig{{Weight: api.MaxInt}, {Weight: 5}},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
description: "after multiplication with MaxPriority the weight is larger than MaxWeight",
|
||||
configs: []algorithm.PriorityConfig{{Weight: api.MaxInt/api.MaxPriority + api.MaxPriority}, {Weight: 5}},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
description: "normal weights",
|
||||
configs: []algorithm.PriorityConfig{{Weight: 10000}, {Weight: 5}},
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
err := validateSelectedConfigs(test.configs)
|
||||
if test.expected {
|
||||
if err == nil {
|
||||
t.Errorf("Expected Overflow for %s", test.description)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Did not expect an overflow for %s", test.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildScoringFunctionShapeFromRequestedToCapacityRatioArguments(t *testing.T) {
|
||||
arguments := api.RequestedToCapacityRatioArguments{
|
||||
UtilizationShape: []api.UtilizationShapePoint{
|
||||
{Utilization: 10, Score: 1},
|
||||
{Utilization: 30, Score: 5},
|
||||
{Utilization: 70, Score: 2},
|
||||
}}
|
||||
builtShape := buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(&arguments)
|
||||
expectedShape, _ := priorities.NewFunctionShape([]priorities.FunctionShapePoint{
|
||||
{Utilization: 10, Score: 1},
|
||||
{Utilization: 30, Score: 5},
|
||||
{Utilization: 70, Score: 2},
|
||||
})
|
||||
assert.Equal(t, expectedShape, builtShape)
|
||||
}
|
25
vendor/k8s.io/kubernetes/pkg/scheduler/factory/signal.go
generated
vendored
Normal file
25
vendor/k8s.io/kubernetes/pkg/scheduler/factory/signal.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package factory
|
||||
|
||||
import "syscall"
|
||||
|
||||
// compareSignal is the signal to trigger cache compare. For non-windows
|
||||
// environment it's SIGUSR2.
|
||||
var compareSignal = syscall.SIGUSR2
|
23
vendor/k8s.io/kubernetes/pkg/scheduler/factory/signal_windows.go
generated
vendored
Normal file
23
vendor/k8s.io/kubernetes/pkg/scheduler/factory/signal_windows.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package factory
|
||||
|
||||
import "os"
|
||||
|
||||
// compareSignal is the signal to trigger cache compare. For windows,
|
||||
// it's SIGINT.
|
||||
var compareSignal = os.Interrupt
|
Reference in New Issue
Block a user