Bumping k8s dependencies to 1.13
This commit is contained in:
42
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/BUILD
generated
vendored
42
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/BUILD
generated
vendored
@@ -9,6 +9,7 @@ load(
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"csi_volume_predicate.go",
|
||||
"error.go",
|
||||
"metadata.go",
|
||||
"predicates.go",
|
||||
@@ -27,25 +28,26 @@ go_library(
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//pkg/scheduler/volumebinder:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"csi_volume_predicate_test.go",
|
||||
"max_attachable_volume_predicate_test.go",
|
||||
"metadata_test.go",
|
||||
"predicates_test.go",
|
||||
@@ -60,14 +62,14 @@ go_test(
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
157
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go
generated
vendored
Normal file
157
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go
generated
vendored
Normal file
@@ -0,0 +1,157 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package predicates
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// CSIMaxVolumeLimitChecker defines predicate needed for counting CSI volumes
|
||||
type CSIMaxVolumeLimitChecker struct {
|
||||
pvInfo PersistentVolumeInfo
|
||||
pvcInfo PersistentVolumeClaimInfo
|
||||
}
|
||||
|
||||
// NewCSIMaxVolumeLimitPredicate returns a predicate for counting CSI volumes
|
||||
func NewCSIMaxVolumeLimitPredicate(
|
||||
pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate {
|
||||
c := &CSIMaxVolumeLimitChecker{
|
||||
pvInfo: pvInfo,
|
||||
pvcInfo: pvcInfo,
|
||||
}
|
||||
return c.attachableLimitPredicate
|
||||
}
|
||||
|
||||
func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate(
|
||||
pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
|
||||
// if feature gate is disable we return
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
||||
return true, nil, nil
|
||||
}
|
||||
// If a pod doesn't have any volume attached to it, the predicate will always be true.
|
||||
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
|
||||
if len(pod.Spec.Volumes) == 0 {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
nodeVolumeLimits := nodeInfo.VolumeLimits()
|
||||
|
||||
// if node does not have volume limits this predicate should exit
|
||||
if len(nodeVolumeLimits) == 0 {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// a map of unique volume name/csi volume handle and volume limit key
|
||||
newVolumes := make(map[string]string)
|
||||
if err := c.filterAttachableVolumes(pod.Spec.Volumes, pod.Namespace, newVolumes); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
if len(newVolumes) == 0 {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// a map of unique volume name/csi volume handle and volume limit key
|
||||
attachedVolumes := make(map[string]string)
|
||||
for _, existingPod := range nodeInfo.Pods() {
|
||||
if err := c.filterAttachableVolumes(existingPod.Spec.Volumes, existingPod.Namespace, attachedVolumes); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
newVolumeCount := map[string]int{}
|
||||
attachedVolumeCount := map[string]int{}
|
||||
|
||||
for volumeName, volumeLimitKey := range attachedVolumes {
|
||||
if _, ok := newVolumes[volumeName]; ok {
|
||||
delete(newVolumes, volumeName)
|
||||
}
|
||||
attachedVolumeCount[volumeLimitKey]++
|
||||
}
|
||||
|
||||
for _, volumeLimitKey := range newVolumes {
|
||||
newVolumeCount[volumeLimitKey]++
|
||||
}
|
||||
|
||||
for volumeLimitKey, count := range newVolumeCount {
|
||||
maxVolumeLimit, ok := nodeVolumeLimits[v1.ResourceName(volumeLimitKey)]
|
||||
if ok {
|
||||
currentVolumeCount := attachedVolumeCount[volumeLimitKey]
|
||||
if currentVolumeCount+count > int(maxVolumeLimit) {
|
||||
return false, []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
func (c *CSIMaxVolumeLimitChecker) filterAttachableVolumes(
|
||||
volumes []v1.Volume, namespace string, result map[string]string) error {
|
||||
|
||||
for _, vol := range volumes {
|
||||
// CSI volumes can only be used as persistent volumes
|
||||
if vol.PersistentVolumeClaim == nil {
|
||||
continue
|
||||
}
|
||||
pvcName := vol.PersistentVolumeClaim.ClaimName
|
||||
|
||||
if pvcName == "" {
|
||||
return fmt.Errorf("PersistentVolumeClaim had no name")
|
||||
}
|
||||
|
||||
pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName)
|
||||
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to look up PVC info for %s/%s", namespace, pvcName)
|
||||
continue
|
||||
}
|
||||
|
||||
pvName := pvc.Spec.VolumeName
|
||||
// TODO - the actual handling of unbound PVCs will be fixed by late binding design.
|
||||
if pvName == "" {
|
||||
glog.V(4).Infof("Persistent volume had no name for claim %s/%s", namespace, pvcName)
|
||||
continue
|
||||
}
|
||||
pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName)
|
||||
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to look up PV info for PVC %s/%s and PV %s", namespace, pvcName, pvName)
|
||||
continue
|
||||
}
|
||||
|
||||
csiSource := pv.Spec.PersistentVolumeSource.CSI
|
||||
if csiSource == nil {
|
||||
glog.V(4).Infof("Not considering non-CSI volume %s/%s", namespace, pvcName)
|
||||
continue
|
||||
}
|
||||
driverName := csiSource.Driver
|
||||
volumeLimitKey := volumeutil.GetCSIAttachLimitKey(driverName)
|
||||
result[csiSource.VolumeHandle] = volumeLimitKey
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
179
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate_test.go
generated
vendored
Normal file
179
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate_test.go
generated
vendored
Normal file
@@ -0,0 +1,179 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package predicates
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
func TestCSIVolumeCountPredicate(t *testing.T) {
|
||||
// for pods with CSI pvcs
|
||||
oneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "cs-ebs-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
runningPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs-3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
newPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
filterName string
|
||||
maxVols int
|
||||
fits bool
|
||||
test string
|
||||
}{
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{runningPod, twoVolPod},
|
||||
filterName: "csi-ebs",
|
||||
maxVols: 4,
|
||||
fits: true,
|
||||
test: "fits when node capacity >= new pods CSI volume",
|
||||
},
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{runningPod, twoVolPod},
|
||||
filterName: "csi-ebs",
|
||||
maxVols: 2,
|
||||
fits: false,
|
||||
test: "doesn't when node capacity <= pods CSI volume",
|
||||
},
|
||||
}
|
||||
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}
|
||||
// running attachable predicate tests with feature gate and limit present on nodes
|
||||
for _, test := range tests {
|
||||
node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.filterName)
|
||||
pred := NewCSIMaxVolumeLimitPredicate(getFakeCSIPVInfo("csi-ebs", "csi-ebs"), getFakeCSIPVCInfo("csi-ebs"))
|
||||
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), node)
|
||||
if err != nil {
|
||||
t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err)
|
||||
}
|
||||
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
|
||||
t.Errorf("Using allocatable [%s]%s: unexpected failure reasons: %v, want: %v", test.filterName, test.test, reasons, expectedFailureReasons)
|
||||
}
|
||||
if fits != test.fits {
|
||||
t.Errorf("Using allocatable [%s]%s: expected %v, got %v", test.filterName, test.test, test.fits, fits)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getFakeCSIPVInfo(volumeName, driverName string) FakePersistentVolumeInfo {
|
||||
return FakePersistentVolumeInfo{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: volumeName},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{
|
||||
Driver: driverName,
|
||||
VolumeHandle: volumeName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-2"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{
|
||||
Driver: driverName,
|
||||
VolumeHandle: volumeName + "-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-3"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{
|
||||
Driver: driverName,
|
||||
VolumeHandle: volumeName + "-3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getFakeCSIPVCInfo(volumeName string) FakePersistentVolumeClaimInfo {
|
||||
return FakePersistentVolumeClaimInfo{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: volumeName},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: volumeName},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-2"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: volumeName + "-2"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-3"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: volumeName + "-3"},
|
||||
},
|
||||
}
|
||||
}
|
@@ -29,6 +29,7 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
@@ -741,60 +742,12 @@ func TestVolumeCountConflicts(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
pvInfo := func(filterName string) FakePersistentVolumeInfo {
|
||||
return FakePersistentVolumeInfo{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: strings.ToLower(filterName) + "Vol"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "someNon" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pvcInfo := func(filterName string) FakePersistentVolumeClaimInfo {
|
||||
return FakePersistentVolumeClaimInfo{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "some" + filterName + "Vol"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "someNon" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "someNon" + filterName + "Vol"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvcWithDeletedPV"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "pvcWithDeletedPV"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "anotherPVCWithDeletedPV"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "anotherPVCWithDeletedPV"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "unboundPVC"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: ""},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "anotherUnboundPVC"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: ""},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}
|
||||
|
||||
// running attachable predicate tests without feature gate and no limit present on nodes
|
||||
for _, test := range tests {
|
||||
os.Setenv(KubeMaxPDVols, strconv.Itoa(test.maxVols))
|
||||
pred := NewMaxPDVolumeCountPredicate(test.filterName, pvInfo(test.filterName), pvcInfo(test.filterName))
|
||||
pred := NewMaxPDVolumeCountPredicate(test.filterName, getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName))
|
||||
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), schedulercache.NewNodeInfo(test.existingPods...))
|
||||
if err != nil {
|
||||
t.Errorf("[%s]%s: unexpected error: %v", test.filterName, test.test, err)
|
||||
@@ -812,7 +765,7 @@ func TestVolumeCountConflicts(t *testing.T) {
|
||||
// running attachable predicate tests with feature gate and limit present on nodes
|
||||
for _, test := range tests {
|
||||
node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.filterName)
|
||||
pred := NewMaxPDVolumeCountPredicate(test.filterName, pvInfo(test.filterName), pvcInfo(test.filterName))
|
||||
pred := NewMaxPDVolumeCountPredicate(test.filterName, getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName))
|
||||
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), node)
|
||||
if err != nil {
|
||||
t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err)
|
||||
@@ -826,6 +779,122 @@ func TestVolumeCountConflicts(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func getFakePVInfo(filterName string) FakePersistentVolumeInfo {
|
||||
return FakePersistentVolumeInfo{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: strings.ToLower(filterName) + "Vol"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "someNon" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getFakePVCInfo(filterName string) FakePersistentVolumeClaimInfo {
|
||||
return FakePersistentVolumeClaimInfo{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "some" + filterName + "Vol"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "someNon" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "someNon" + filterName + "Vol"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvcWithDeletedPV"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "pvcWithDeletedPV"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "anotherPVCWithDeletedPV"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "anotherPVCWithDeletedPV"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "unboundPVC"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: ""},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "anotherUnboundPVC"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: ""},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaxVolumeFuncM5(t *testing.T) {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-for-m5-instance",
|
||||
Labels: map[string]string{
|
||||
kubeletapis.LabelInstanceType: "m5.large",
|
||||
},
|
||||
},
|
||||
}
|
||||
os.Unsetenv(KubeMaxPDVols)
|
||||
maxVolumeFunc := getMaxVolumeFunc(EBSVolumeFilterType)
|
||||
maxVolume := maxVolumeFunc(node)
|
||||
if maxVolume != volumeutil.DefaultMaxEBSNitroVolumeLimit {
|
||||
t.Errorf("Expected max volume to be %d got %d", volumeutil.DefaultMaxEBSNitroVolumeLimit, maxVolume)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaxVolumeFuncT3(t *testing.T) {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-for-t3-instance",
|
||||
Labels: map[string]string{
|
||||
kubeletapis.LabelInstanceType: "t3.medium",
|
||||
},
|
||||
},
|
||||
}
|
||||
os.Unsetenv(KubeMaxPDVols)
|
||||
maxVolumeFunc := getMaxVolumeFunc(EBSVolumeFilterType)
|
||||
maxVolume := maxVolumeFunc(node)
|
||||
if maxVolume != volumeutil.DefaultMaxEBSNitroVolumeLimit {
|
||||
t.Errorf("Expected max volume to be %d got %d", volumeutil.DefaultMaxEBSNitroVolumeLimit, maxVolume)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaxVolumeFuncR5(t *testing.T) {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-for-r5-instance",
|
||||
Labels: map[string]string{
|
||||
kubeletapis.LabelInstanceType: "r5d.xlarge",
|
||||
},
|
||||
},
|
||||
}
|
||||
os.Unsetenv(KubeMaxPDVols)
|
||||
maxVolumeFunc := getMaxVolumeFunc(EBSVolumeFilterType)
|
||||
maxVolume := maxVolumeFunc(node)
|
||||
if maxVolume != volumeutil.DefaultMaxEBSNitroVolumeLimit {
|
||||
t.Errorf("Expected max volume to be %d got %d", volumeutil.DefaultMaxEBSNitroVolumeLimit, maxVolume)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaxVolumeFuncM4(t *testing.T) {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-for-m4-instance",
|
||||
Labels: map[string]string{
|
||||
kubeletapis.LabelInstanceType: "m4.2xlarge",
|
||||
},
|
||||
},
|
||||
}
|
||||
os.Unsetenv(KubeMaxPDVols)
|
||||
maxVolumeFunc := getMaxVolumeFunc(EBSVolumeFilterType)
|
||||
maxVolume := maxVolumeFunc(node)
|
||||
if maxVolume != volumeutil.DefaultMaxEBSVolumes {
|
||||
t.Errorf("Expected max volume to be %d got %d", volumeutil.DefaultMaxEBSVolumes, maxVolume)
|
||||
}
|
||||
}
|
||||
|
||||
func getNodeWithPodAndVolumeLimits(pods []*v1.Pod, limit int64, filter string) *schedulercache.NodeInfo {
|
||||
nodeInfo := schedulercache.NewNodeInfo(pods...)
|
||||
node := &v1.Node{
|
||||
@@ -849,6 +918,6 @@ func getVolumeLimitKey(filterType string) v1.ResourceName {
|
||||
case AzureDiskVolumeFilterType:
|
||||
return v1.ResourceName(volumeutil.AzureVolumeLimitKey)
|
||||
default:
|
||||
return ""
|
||||
return v1.ResourceName(volumeutil.GetCSIAttachLimitKey(filterType))
|
||||
}
|
||||
}
|
||||
|
333
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/metadata.go
generated
vendored
333
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/metadata.go
generated
vendored
@@ -38,6 +38,12 @@ type PredicateMetadataFactory struct {
|
||||
podLister algorithm.PodLister
|
||||
}
|
||||
|
||||
// AntiAffinityTerm's topology key value used in predicate metadata
|
||||
type topologyPair struct {
|
||||
key string
|
||||
value string
|
||||
}
|
||||
|
||||
// Note that predicateMetadata and matchingPodAntiAffinityTerm need to be declared in the same file
|
||||
// due to the way declarations are processed in predicate declaration unit tests.
|
||||
type matchingPodAntiAffinityTerm struct {
|
||||
@@ -45,6 +51,17 @@ type matchingPodAntiAffinityTerm struct {
|
||||
node *v1.Node
|
||||
}
|
||||
|
||||
type podSet map[*v1.Pod]struct{}
|
||||
|
||||
type topologyPairSet map[topologyPair]struct{}
|
||||
|
||||
// topologyPairsMaps keeps topologyPairToAntiAffinityPods and antiAffinityPodToTopologyPairs in sync
|
||||
// as they are the inverse of each others.
|
||||
type topologyPairsMaps struct {
|
||||
topologyPairToPods map[topologyPair]podSet
|
||||
podToTopologyPairs map[string]topologyPairSet
|
||||
}
|
||||
|
||||
// NOTE: When new fields are added/removed or logic is changed, please make sure that
|
||||
// RemovePod, AddPod, and ShallowCopy functions are updated to work with the new changes.
|
||||
type predicateMetadata struct {
|
||||
@@ -52,17 +69,17 @@ type predicateMetadata struct {
|
||||
podBestEffort bool
|
||||
podRequest *schedulercache.Resource
|
||||
podPorts []*v1.ContainerPort
|
||||
//key is a pod full name with the anti-affinity rules.
|
||||
matchingAntiAffinityTerms map[string][]matchingPodAntiAffinityTerm
|
||||
// A map of node name to a list of Pods on the node that can potentially match
|
||||
// the affinity rules of the "pod".
|
||||
nodeNameToMatchingAffinityPods map[string][]*v1.Pod
|
||||
// A map of node name to a list of Pods on the node that can potentially match
|
||||
// the anti-affinity rules of the "pod".
|
||||
nodeNameToMatchingAntiAffinityPods map[string][]*v1.Pod
|
||||
serviceAffinityInUse bool
|
||||
serviceAffinityMatchingPodList []*v1.Pod
|
||||
serviceAffinityMatchingPodServices []*v1.Service
|
||||
|
||||
topologyPairsAntiAffinityPodsMap *topologyPairsMaps
|
||||
// A map of topology pairs to a list of Pods that can potentially match
|
||||
// the affinity terms of the "pod" and its inverse.
|
||||
topologyPairsPotentialAffinityPods *topologyPairsMaps
|
||||
// A map of topology pairs to a list of Pods that can potentially match
|
||||
// the anti-affinity terms of the "pod" and its inverse.
|
||||
topologyPairsPotentialAntiAffinityPods *topologyPairsMaps
|
||||
serviceAffinityInUse bool
|
||||
serviceAffinityMatchingPodList []*v1.Pod
|
||||
serviceAffinityMatchingPodServices []*v1.Service
|
||||
// ignoredExtendedResources is a set of extended resource names that will
|
||||
// be ignored in the PodFitsResources predicate.
|
||||
//
|
||||
@@ -113,23 +130,26 @@ func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInf
|
||||
if pod == nil {
|
||||
return nil
|
||||
}
|
||||
matchingTerms, err := getMatchingAntiAffinityTerms(pod, nodeNameToInfoMap)
|
||||
// existingPodAntiAffinityMap will be used later for efficient check on existing pods' anti-affinity
|
||||
existingPodAntiAffinityMap, err := getTPMapMatchingExistingAntiAffinity(pod, nodeNameToInfoMap)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
affinityPods, antiAffinityPods, err := getPodsMatchingAffinity(pod, nodeNameToInfoMap)
|
||||
// incomingPodAffinityMap will be used later for efficient check on incoming pod's affinity
|
||||
// incomingPodAntiAffinityMap will be used later for efficient check on incoming pod's anti-affinity
|
||||
incomingPodAffinityMap, incomingPodAntiAffinityMap, err := getTPMapMatchingIncomingAffinityAntiAffinity(pod, nodeNameToInfoMap)
|
||||
if err != nil {
|
||||
glog.Errorf("[predicate meta data generation] error finding pods that match affinity terms: %v", err)
|
||||
return nil
|
||||
}
|
||||
predicateMetadata := &predicateMetadata{
|
||||
pod: pod,
|
||||
podBestEffort: isPodBestEffort(pod),
|
||||
podRequest: GetResourceRequest(pod),
|
||||
podPorts: schedutil.GetContainerPorts(pod),
|
||||
matchingAntiAffinityTerms: matchingTerms,
|
||||
nodeNameToMatchingAffinityPods: affinityPods,
|
||||
nodeNameToMatchingAntiAffinityPods: antiAffinityPods,
|
||||
pod: pod,
|
||||
podBestEffort: isPodBestEffort(pod),
|
||||
podRequest: GetResourceRequest(pod),
|
||||
podPorts: schedutil.GetContainerPorts(pod),
|
||||
topologyPairsPotentialAffinityPods: incomingPodAffinityMap,
|
||||
topologyPairsPotentialAntiAffinityPods: incomingPodAntiAffinityMap,
|
||||
topologyPairsAntiAffinityPodsMap: existingPodAntiAffinityMap,
|
||||
}
|
||||
for predicateName, precomputeFunc := range predicateMetadataProducers {
|
||||
glog.V(10).Infof("Precompute: %v", predicateName)
|
||||
@@ -138,6 +158,46 @@ func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInf
|
||||
return predicateMetadata
|
||||
}
|
||||
|
||||
// returns a pointer to a new topologyPairsMaps
|
||||
func newTopologyPairsMaps() *topologyPairsMaps {
|
||||
return &topologyPairsMaps{topologyPairToPods: make(map[topologyPair]podSet),
|
||||
podToTopologyPairs: make(map[string]topologyPairSet)}
|
||||
}
|
||||
|
||||
func (topologyPairsMaps *topologyPairsMaps) addTopologyPair(pair topologyPair, pod *v1.Pod) {
|
||||
podFullName := schedutil.GetPodFullName(pod)
|
||||
if topologyPairsMaps.topologyPairToPods[pair] == nil {
|
||||
topologyPairsMaps.topologyPairToPods[pair] = make(map[*v1.Pod]struct{})
|
||||
}
|
||||
topologyPairsMaps.topologyPairToPods[pair][pod] = struct{}{}
|
||||
if topologyPairsMaps.podToTopologyPairs[podFullName] == nil {
|
||||
topologyPairsMaps.podToTopologyPairs[podFullName] = make(map[topologyPair]struct{})
|
||||
}
|
||||
topologyPairsMaps.podToTopologyPairs[podFullName][pair] = struct{}{}
|
||||
}
|
||||
|
||||
func (topologyPairsMaps *topologyPairsMaps) removePod(deletedPod *v1.Pod) {
|
||||
deletedPodFullName := schedutil.GetPodFullName(deletedPod)
|
||||
for pair := range topologyPairsMaps.podToTopologyPairs[deletedPodFullName] {
|
||||
delete(topologyPairsMaps.topologyPairToPods[pair], deletedPod)
|
||||
if len(topologyPairsMaps.topologyPairToPods[pair]) == 0 {
|
||||
delete(topologyPairsMaps.topologyPairToPods, pair)
|
||||
}
|
||||
}
|
||||
delete(topologyPairsMaps.podToTopologyPairs, deletedPodFullName)
|
||||
}
|
||||
|
||||
func (topologyPairsMaps *topologyPairsMaps) appendMaps(toAppend *topologyPairsMaps) {
|
||||
if toAppend == nil {
|
||||
return
|
||||
}
|
||||
for pair := range toAppend.topologyPairToPods {
|
||||
for pod := range toAppend.topologyPairToPods[pair] {
|
||||
topologyPairsMaps.addTopologyPair(pair, pod)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RemovePod changes predicateMetadata assuming that the given `deletedPod` is
|
||||
// deleted from the system.
|
||||
func (meta *predicateMetadata) RemovePod(deletedPod *v1.Pod) error {
|
||||
@@ -145,35 +205,10 @@ func (meta *predicateMetadata) RemovePod(deletedPod *v1.Pod) error {
|
||||
if deletedPodFullName == schedutil.GetPodFullName(meta.pod) {
|
||||
return fmt.Errorf("deletedPod and meta.pod must not be the same")
|
||||
}
|
||||
// Delete any anti-affinity rule from the deletedPod.
|
||||
delete(meta.matchingAntiAffinityTerms, deletedPodFullName)
|
||||
// Delete pod from the matching affinity or anti-affinity pods if exists.
|
||||
affinity := meta.pod.Spec.Affinity
|
||||
podNodeName := deletedPod.Spec.NodeName
|
||||
if affinity != nil && len(podNodeName) > 0 {
|
||||
if affinity.PodAffinity != nil {
|
||||
for i, p := range meta.nodeNameToMatchingAffinityPods[podNodeName] {
|
||||
if p == deletedPod {
|
||||
s := meta.nodeNameToMatchingAffinityPods[podNodeName]
|
||||
s[i] = s[len(s)-1]
|
||||
s = s[:len(s)-1]
|
||||
meta.nodeNameToMatchingAffinityPods[podNodeName] = s
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if affinity.PodAntiAffinity != nil {
|
||||
for i, p := range meta.nodeNameToMatchingAntiAffinityPods[podNodeName] {
|
||||
if p == deletedPod {
|
||||
s := meta.nodeNameToMatchingAntiAffinityPods[podNodeName]
|
||||
s[i] = s[len(s)-1]
|
||||
s = s[:len(s)-1]
|
||||
meta.nodeNameToMatchingAntiAffinityPods[podNodeName] = s
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
meta.topologyPairsAntiAffinityPodsMap.removePod(deletedPod)
|
||||
// Delete pod from the matching affinity or anti-affinity topology pairs maps.
|
||||
meta.topologyPairsPotentialAffinityPods.removePod(deletedPod)
|
||||
meta.topologyPairsPotentialAntiAffinityPods.removePod(deletedPod)
|
||||
// All pods in the serviceAffinityMatchingPodList are in the same namespace.
|
||||
// So, if the namespace of the first one is not the same as the namespace of the
|
||||
// deletedPod, we don't need to check the list, as deletedPod isn't in the list.
|
||||
@@ -203,46 +238,36 @@ func (meta *predicateMetadata) AddPod(addedPod *v1.Pod, nodeInfo *schedulercache
|
||||
return fmt.Errorf("invalid node in nodeInfo")
|
||||
}
|
||||
// Add matching anti-affinity terms of the addedPod to the map.
|
||||
podMatchingTerms, err := getMatchingAntiAffinityTermsOfExistingPod(meta.pod, addedPod, nodeInfo.Node())
|
||||
topologyPairsMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(meta.pod, addedPod, nodeInfo.Node())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(podMatchingTerms) > 0 {
|
||||
existingTerms, found := meta.matchingAntiAffinityTerms[addedPodFullName]
|
||||
if found {
|
||||
meta.matchingAntiAffinityTerms[addedPodFullName] = append(existingTerms,
|
||||
podMatchingTerms...)
|
||||
} else {
|
||||
meta.matchingAntiAffinityTerms[addedPodFullName] = podMatchingTerms
|
||||
}
|
||||
}
|
||||
meta.topologyPairsAntiAffinityPodsMap.appendMaps(topologyPairsMaps)
|
||||
// Add the pod to nodeNameToMatchingAffinityPods and nodeNameToMatchingAntiAffinityPods if needed.
|
||||
affinity := meta.pod.Spec.Affinity
|
||||
podNodeName := addedPod.Spec.NodeName
|
||||
if affinity != nil && len(podNodeName) > 0 {
|
||||
podNode := nodeInfo.Node()
|
||||
// It is assumed that when the added pod matches affinity of the meta.pod, all the terms must match,
|
||||
// this should be changed when the implementation of targetPodMatchesAffinityOfPod/podMatchesAffinityTermProperties
|
||||
// is changed
|
||||
if targetPodMatchesAffinityOfPod(meta.pod, addedPod) {
|
||||
found := false
|
||||
for _, p := range meta.nodeNameToMatchingAffinityPods[podNodeName] {
|
||||
if p == addedPod {
|
||||
found = true
|
||||
break
|
||||
affinityTerms := GetPodAffinityTerms(affinity.PodAffinity)
|
||||
for _, term := range affinityTerms {
|
||||
if topologyValue, ok := podNode.Labels[term.TopologyKey]; ok {
|
||||
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
|
||||
meta.topologyPairsPotentialAffinityPods.addTopologyPair(pair, addedPod)
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
meta.nodeNameToMatchingAffinityPods[podNodeName] = append(meta.nodeNameToMatchingAffinityPods[podNodeName], addedPod)
|
||||
}
|
||||
}
|
||||
if targetPodMatchesAntiAffinityOfPod(meta.pod, addedPod) {
|
||||
found := false
|
||||
for _, p := range meta.nodeNameToMatchingAntiAffinityPods[podNodeName] {
|
||||
if p == addedPod {
|
||||
found = true
|
||||
break
|
||||
antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity)
|
||||
for _, term := range antiAffinityTerms {
|
||||
if topologyValue, ok := podNode.Labels[term.TopologyKey]; ok {
|
||||
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
|
||||
meta.topologyPairsPotentialAntiAffinityPods.addTopologyPair(pair, addedPod)
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
meta.nodeNameToMatchingAntiAffinityPods[podNodeName] = append(meta.nodeNameToMatchingAntiAffinityPods[podNodeName], addedPod)
|
||||
}
|
||||
}
|
||||
}
|
||||
// If addedPod is in the same namespace as the meta.pod, update the list
|
||||
@@ -268,18 +293,12 @@ func (meta *predicateMetadata) ShallowCopy() algorithm.PredicateMetadata {
|
||||
ignoredExtendedResources: meta.ignoredExtendedResources,
|
||||
}
|
||||
newPredMeta.podPorts = append([]*v1.ContainerPort(nil), meta.podPorts...)
|
||||
newPredMeta.matchingAntiAffinityTerms = map[string][]matchingPodAntiAffinityTerm{}
|
||||
for k, v := range meta.matchingAntiAffinityTerms {
|
||||
newPredMeta.matchingAntiAffinityTerms[k] = append([]matchingPodAntiAffinityTerm(nil), v...)
|
||||
}
|
||||
newPredMeta.nodeNameToMatchingAffinityPods = make(map[string][]*v1.Pod)
|
||||
for k, v := range meta.nodeNameToMatchingAffinityPods {
|
||||
newPredMeta.nodeNameToMatchingAffinityPods[k] = append([]*v1.Pod(nil), v...)
|
||||
}
|
||||
newPredMeta.nodeNameToMatchingAntiAffinityPods = make(map[string][]*v1.Pod)
|
||||
for k, v := range meta.nodeNameToMatchingAntiAffinityPods {
|
||||
newPredMeta.nodeNameToMatchingAntiAffinityPods[k] = append([]*v1.Pod(nil), v...)
|
||||
}
|
||||
newPredMeta.topologyPairsPotentialAffinityPods = newTopologyPairsMaps()
|
||||
newPredMeta.topologyPairsPotentialAffinityPods.appendMaps(meta.topologyPairsPotentialAffinityPods)
|
||||
newPredMeta.topologyPairsPotentialAntiAffinityPods = newTopologyPairsMaps()
|
||||
newPredMeta.topologyPairsPotentialAntiAffinityPods.appendMaps(meta.topologyPairsPotentialAntiAffinityPods)
|
||||
newPredMeta.topologyPairsAntiAffinityPodsMap = newTopologyPairsMaps()
|
||||
newPredMeta.topologyPairsAntiAffinityPodsMap.appendMaps(meta.topologyPairsAntiAffinityPodsMap)
|
||||
newPredMeta.serviceAffinityMatchingPodServices = append([]*v1.Service(nil),
|
||||
meta.serviceAffinityMatchingPodServices...)
|
||||
newPredMeta.serviceAffinityMatchingPodList = append([]*v1.Pod(nil),
|
||||
@@ -310,8 +329,8 @@ func getAffinityTermProperties(pod *v1.Pod, terms []v1.PodAffinityTerm) (propert
|
||||
return properties, nil
|
||||
}
|
||||
|
||||
// podMatchesAffinityTermProperties return true IFF the given pod matches all the given properties.
|
||||
func podMatchesAffinityTermProperties(pod *v1.Pod, properties []*affinityTermProperties) bool {
|
||||
// podMatchesAllAffinityTermProperties returns true IFF the given pod matches all the given properties.
|
||||
func podMatchesAllAffinityTermProperties(pod *v1.Pod, properties []*affinityTermProperties) bool {
|
||||
if len(properties) == 0 {
|
||||
return false
|
||||
}
|
||||
@@ -323,16 +342,76 @@ func podMatchesAffinityTermProperties(pod *v1.Pod, properties []*affinityTermPro
|
||||
return true
|
||||
}
|
||||
|
||||
// getPodsMatchingAffinity finds existing Pods that match affinity terms of the given "pod".
|
||||
// It ignores topology. It returns a set of Pods that are checked later by the affinity
|
||||
// predicate. With this set of pods available, the affinity predicate does not
|
||||
// podMatchesAnyAffinityTermProperties returns true if the given pod matches any given property.
|
||||
func podMatchesAnyAffinityTermProperties(pod *v1.Pod, properties []*affinityTermProperties) bool {
|
||||
if len(properties) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, property := range properties {
|
||||
if priorityutil.PodMatchesTermsNamespaceAndSelector(pod, property.namespaces, property.selector) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getTPMapMatchingExistingAntiAffinity calculates the following for each existing pod on each node:
|
||||
// (1) Whether it has PodAntiAffinity
|
||||
// (2) Whether any AffinityTerm matches the incoming pod
|
||||
func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) (*topologyPairsMaps, error) {
|
||||
allNodeNames := make([]string, 0, len(nodeInfoMap))
|
||||
for name := range nodeInfoMap {
|
||||
allNodeNames = append(allNodeNames, name)
|
||||
}
|
||||
|
||||
var lock sync.Mutex
|
||||
var firstError error
|
||||
|
||||
topologyMaps := newTopologyPairsMaps()
|
||||
|
||||
appendTopologyPairsMaps := func(toAppend *topologyPairsMaps) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
topologyMaps.appendMaps(toAppend)
|
||||
}
|
||||
catchError := func(err error) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
}
|
||||
}
|
||||
|
||||
processNode := func(i int) {
|
||||
nodeInfo := nodeInfoMap[allNodeNames[i]]
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
catchError(fmt.Errorf("node not found"))
|
||||
return
|
||||
}
|
||||
for _, existingPod := range nodeInfo.PodsWithAffinity() {
|
||||
existingPodTopologyMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, existingPod, node)
|
||||
if err != nil {
|
||||
catchError(err)
|
||||
return
|
||||
}
|
||||
appendTopologyPairsMaps(existingPodTopologyMaps)
|
||||
}
|
||||
}
|
||||
workqueue.Parallelize(16, len(allNodeNames), processNode)
|
||||
return topologyMaps, firstError
|
||||
}
|
||||
|
||||
// getTPMapMatchingIncomingAffinityAntiAffinity finds existing Pods that match affinity terms of the given "pod".
|
||||
// It returns a topologyPairsMaps that are checked later by the affinity
|
||||
// predicate. With this topologyPairsMaps available, the affinity predicate does not
|
||||
// need to check all the pods in the cluster.
|
||||
func getPodsMatchingAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) (affinityPods map[string][]*v1.Pod, antiAffinityPods map[string][]*v1.Pod, err error) {
|
||||
func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) (topologyPairsAffinityPodsMaps *topologyPairsMaps, topologyPairsAntiAffinityPodsMaps *topologyPairsMaps, err error) {
|
||||
allNodeNames := make([]string, 0, len(nodeInfoMap))
|
||||
|
||||
affinity := pod.Spec.Affinity
|
||||
if affinity == nil || (affinity.PodAffinity == nil && affinity.PodAntiAffinity == nil) {
|
||||
return nil, nil, nil
|
||||
return newTopologyPairsMaps(), newTopologyPairsMaps(), nil
|
||||
}
|
||||
|
||||
for name := range nodeInfoMap {
|
||||
@@ -341,16 +420,16 @@ func getPodsMatchingAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache
|
||||
|
||||
var lock sync.Mutex
|
||||
var firstError error
|
||||
affinityPods = make(map[string][]*v1.Pod)
|
||||
antiAffinityPods = make(map[string][]*v1.Pod)
|
||||
appendResult := func(nodeName string, affPods, antiAffPods []*v1.Pod) {
|
||||
topologyPairsAffinityPodsMaps = newTopologyPairsMaps()
|
||||
topologyPairsAntiAffinityPodsMaps = newTopologyPairsMaps()
|
||||
appendResult := func(nodeName string, nodeTopologyPairsAffinityPodsMaps, nodeTopologyPairsAntiAffinityPodsMaps *topologyPairsMaps) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
if len(affPods) > 0 {
|
||||
affinityPods[nodeName] = affPods
|
||||
if len(nodeTopologyPairsAffinityPodsMaps.topologyPairToPods) > 0 {
|
||||
topologyPairsAffinityPodsMaps.appendMaps(nodeTopologyPairsAffinityPodsMaps)
|
||||
}
|
||||
if len(antiAffPods) > 0 {
|
||||
antiAffinityPods[nodeName] = antiAffPods
|
||||
if len(nodeTopologyPairsAntiAffinityPodsMaps.topologyPairToPods) > 0 {
|
||||
topologyPairsAntiAffinityPodsMaps.appendMaps(nodeTopologyPairsAntiAffinityPodsMaps)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -362,14 +441,12 @@ func getPodsMatchingAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache
|
||||
}
|
||||
}
|
||||
|
||||
affinityProperties, err := getAffinityTermProperties(pod, GetPodAffinityTerms(affinity.PodAffinity))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
antiAffinityProperties, err := getAffinityTermProperties(pod, GetPodAntiAffinityTerms(affinity.PodAntiAffinity))
|
||||
affinityTerms := GetPodAffinityTerms(affinity.PodAffinity)
|
||||
affinityProperties, err := getAffinityTermProperties(pod, affinityTerms)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity)
|
||||
|
||||
processNode := func(i int) {
|
||||
nodeInfo := nodeInfoMap[allNodeNames[i]]
|
||||
@@ -378,27 +455,43 @@ func getPodsMatchingAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache
|
||||
catchError(fmt.Errorf("nodeInfo.Node is nil"))
|
||||
return
|
||||
}
|
||||
affPods := make([]*v1.Pod, 0, len(nodeInfo.Pods()))
|
||||
antiAffPods := make([]*v1.Pod, 0, len(nodeInfo.Pods()))
|
||||
nodeTopologyPairsAffinityPodsMaps := newTopologyPairsMaps()
|
||||
nodeTopologyPairsAntiAffinityPodsMaps := newTopologyPairsMaps()
|
||||
for _, existingPod := range nodeInfo.Pods() {
|
||||
// Check affinity properties.
|
||||
if podMatchesAffinityTermProperties(existingPod, affinityProperties) {
|
||||
affPods = append(affPods, existingPod)
|
||||
if podMatchesAllAffinityTermProperties(existingPod, affinityProperties) {
|
||||
for _, term := range affinityTerms {
|
||||
if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
|
||||
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
|
||||
nodeTopologyPairsAffinityPodsMaps.addTopologyPair(pair, existingPod)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Check anti-affinity properties.
|
||||
if podMatchesAffinityTermProperties(existingPod, antiAffinityProperties) {
|
||||
antiAffPods = append(antiAffPods, existingPod)
|
||||
for _, term := range antiAffinityTerms {
|
||||
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(pod, &term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
catchError(err)
|
||||
return
|
||||
}
|
||||
if priorityutil.PodMatchesTermsNamespaceAndSelector(existingPod, namespaces, selector) {
|
||||
if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
|
||||
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
|
||||
nodeTopologyPairsAntiAffinityPodsMaps.addTopologyPair(pair, existingPod)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(antiAffPods) > 0 || len(affPods) > 0 {
|
||||
appendResult(node.Name, affPods, antiAffPods)
|
||||
if len(nodeTopologyPairsAffinityPodsMaps.topologyPairToPods) > 0 || len(nodeTopologyPairsAntiAffinityPodsMaps.topologyPairToPods) > 0 {
|
||||
appendResult(node.Name, nodeTopologyPairsAffinityPodsMaps, nodeTopologyPairsAntiAffinityPodsMaps)
|
||||
}
|
||||
}
|
||||
workqueue.Parallelize(16, len(allNodeNames), processNode)
|
||||
return affinityPods, antiAffinityPods, firstError
|
||||
return topologyPairsAffinityPodsMaps, topologyPairsAntiAffinityPodsMaps, firstError
|
||||
}
|
||||
|
||||
// podMatchesAffinity returns true if "targetPod" matches any affinity rule of
|
||||
// targetPodMatchesAffinityOfPod returns true if "targetPod" matches ALL affinity terms of
|
||||
// "pod". Similar to getPodsMatchingAffinity, this function does not check topology.
|
||||
// So, whether the targetPod actually matches or not needs further checks for a specific
|
||||
// node.
|
||||
@@ -412,11 +505,11 @@ func targetPodMatchesAffinityOfPod(pod, targetPod *v1.Pod) bool {
|
||||
glog.Errorf("error in getting affinity properties of Pod %v", pod.Name)
|
||||
return false
|
||||
}
|
||||
return podMatchesAffinityTermProperties(targetPod, affinityProperties)
|
||||
return podMatchesAllAffinityTermProperties(targetPod, affinityProperties)
|
||||
}
|
||||
|
||||
// targetPodMatchesAntiAffinityOfPod returns true if "targetPod" matches any anti-affinity
|
||||
// rule of "pod". Similar to getPodsMatchingAffinity, this function does not check topology.
|
||||
// targetPodMatchesAntiAffinityOfPod returns true if "targetPod" matches ANY anti-affinity
|
||||
// term of "pod". Similar to getPodsMatchingAffinity, this function does not check topology.
|
||||
// So, whether the targetPod actually matches or not needs further checks for a specific
|
||||
// node.
|
||||
func targetPodMatchesAntiAffinityOfPod(pod, targetPod *v1.Pod) bool {
|
||||
@@ -429,5 +522,5 @@ func targetPodMatchesAntiAffinityOfPod(pod, targetPod *v1.Pod) bool {
|
||||
glog.Errorf("error in getting anti-affinity properties of Pod %v", pod.Name)
|
||||
return false
|
||||
}
|
||||
return podMatchesAffinityTermProperties(targetPod, properties)
|
||||
return podMatchesAnyAffinityTermProperties(targetPod, properties)
|
||||
}
|
||||
|
527
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/metadata_test.go
generated
vendored
527
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/metadata_test.go
generated
vendored
@@ -28,42 +28,6 @@ import (
|
||||
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
// sortableAntiAffinityTerms lets us to sort anti-affinity terms.
|
||||
type sortableAntiAffinityTerms []matchingPodAntiAffinityTerm
|
||||
|
||||
// Less establishes some ordering between two matchingPodAntiAffinityTerms for
|
||||
// sorting.
|
||||
func (s sortableAntiAffinityTerms) Less(i, j int) bool {
|
||||
t1, t2 := s[i], s[j]
|
||||
if t1.node.Name != t2.node.Name {
|
||||
return t1.node.Name < t2.node.Name
|
||||
}
|
||||
if len(t1.term.Namespaces) != len(t2.term.Namespaces) {
|
||||
return len(t1.term.Namespaces) < len(t2.term.Namespaces)
|
||||
}
|
||||
if t1.term.TopologyKey != t2.term.TopologyKey {
|
||||
return t1.term.TopologyKey < t2.term.TopologyKey
|
||||
}
|
||||
if len(t1.term.LabelSelector.MatchLabels) != len(t2.term.LabelSelector.MatchLabels) {
|
||||
return len(t1.term.LabelSelector.MatchLabels) < len(t2.term.LabelSelector.MatchLabels)
|
||||
}
|
||||
return false
|
||||
}
|
||||
func (s sortableAntiAffinityTerms) Len() int { return len(s) }
|
||||
func (s sortableAntiAffinityTerms) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
var _ = sort.Interface(sortableAntiAffinityTerms{})
|
||||
|
||||
func sortAntiAffinityTerms(terms map[string][]matchingPodAntiAffinityTerm) {
|
||||
for k, v := range terms {
|
||||
sortableTerms := sortableAntiAffinityTerms(v)
|
||||
sort.Sort(sortableTerms)
|
||||
terms[k] = sortableTerms
|
||||
}
|
||||
}
|
||||
|
||||
// sortablePods lets us to sort pods.
|
||||
type sortablePods []*v1.Pod
|
||||
|
||||
@@ -88,13 +52,6 @@ func (s sortableServices) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
var _ = sort.Interface(&sortableServices{})
|
||||
|
||||
func sortNodePodMap(np map[string][]*v1.Pod) {
|
||||
for _, pl := range np {
|
||||
sortablePods := sortablePods(pl)
|
||||
sort.Sort(sortablePods)
|
||||
}
|
||||
}
|
||||
|
||||
// predicateMetadataEquivalent returns true if the two metadata are equivalent.
|
||||
// Note: this function does not compare podRequest.
|
||||
func predicateMetadataEquivalent(meta1, meta2 *predicateMetadata) error {
|
||||
@@ -113,20 +70,19 @@ func predicateMetadataEquivalent(meta1, meta2 *predicateMetadata) error {
|
||||
for !reflect.DeepEqual(meta1.podPorts, meta2.podPorts) {
|
||||
return fmt.Errorf("podPorts are not equal")
|
||||
}
|
||||
sortAntiAffinityTerms(meta1.matchingAntiAffinityTerms)
|
||||
sortAntiAffinityTerms(meta2.matchingAntiAffinityTerms)
|
||||
if !reflect.DeepEqual(meta1.matchingAntiAffinityTerms, meta2.matchingAntiAffinityTerms) {
|
||||
return fmt.Errorf("matchingAntiAffinityTerms are not euqal")
|
||||
if !reflect.DeepEqual(meta1.topologyPairsPotentialAffinityPods, meta2.topologyPairsPotentialAffinityPods) {
|
||||
return fmt.Errorf("topologyPairsPotentialAffinityPods are not equal")
|
||||
}
|
||||
sortNodePodMap(meta1.nodeNameToMatchingAffinityPods)
|
||||
sortNodePodMap(meta2.nodeNameToMatchingAffinityPods)
|
||||
if !reflect.DeepEqual(meta1.nodeNameToMatchingAffinityPods, meta2.nodeNameToMatchingAffinityPods) {
|
||||
return fmt.Errorf("nodeNameToMatchingAffinityPods are not euqal")
|
||||
if !reflect.DeepEqual(meta1.topologyPairsPotentialAntiAffinityPods, meta2.topologyPairsPotentialAntiAffinityPods) {
|
||||
return fmt.Errorf("topologyPairsPotentialAntiAffinityPods are not equal")
|
||||
}
|
||||
sortNodePodMap(meta1.nodeNameToMatchingAntiAffinityPods)
|
||||
sortNodePodMap(meta2.nodeNameToMatchingAntiAffinityPods)
|
||||
if !reflect.DeepEqual(meta1.nodeNameToMatchingAntiAffinityPods, meta2.nodeNameToMatchingAntiAffinityPods) {
|
||||
return fmt.Errorf("nodeNameToMatchingAntiAffinityPods are not euqal")
|
||||
if !reflect.DeepEqual(meta1.topologyPairsAntiAffinityPodsMap.podToTopologyPairs,
|
||||
meta2.topologyPairsAntiAffinityPodsMap.podToTopologyPairs) {
|
||||
return fmt.Errorf("topologyPairsAntiAffinityPodsMap.podToTopologyPairs are not equal")
|
||||
}
|
||||
if !reflect.DeepEqual(meta1.topologyPairsAntiAffinityPodsMap.topologyPairToPods,
|
||||
meta2.topologyPairsAntiAffinityPodsMap.topologyPairToPods) {
|
||||
return fmt.Errorf("topologyPairsAntiAffinityPodsMap.topologyPairToPods are not equal")
|
||||
}
|
||||
if meta1.serviceAffinityInUse {
|
||||
sortablePods1 := sortablePods(meta1.serviceAffinityMatchingPodList)
|
||||
@@ -236,7 +192,7 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) {
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
name string
|
||||
pendingPod *v1.Pod
|
||||
addedPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
@@ -244,7 +200,7 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) {
|
||||
services []*v1.Service
|
||||
}{
|
||||
{
|
||||
description: "no anti-affinity or service affinity exist",
|
||||
name: "no anti-affinity or service affinity exist",
|
||||
pendingPod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pending", Labels: selector1},
|
||||
},
|
||||
@@ -267,7 +223,7 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "metadata anti-affinity terms are updated correctly after adding and removing a pod",
|
||||
name: "metadata anti-affinity terms are updated correctly after adding and removing a pod",
|
||||
pendingPod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pending", Labels: selector1},
|
||||
},
|
||||
@@ -300,7 +256,7 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "metadata service-affinity data are updated correctly after adding and removing a pod",
|
||||
name: "metadata service-affinity data are updated correctly after adding and removing a pod",
|
||||
pendingPod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pending", Labels: selector1},
|
||||
},
|
||||
@@ -324,7 +280,7 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "metadata anti-affinity terms and service affinity data are updated correctly after adding and removing a pod",
|
||||
name: "metadata anti-affinity terms and service affinity data are updated correctly after adding and removing a pod",
|
||||
pendingPod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pending", Labels: selector1},
|
||||
},
|
||||
@@ -358,7 +314,7 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "metadata matching pod affinity and anti-affinity are updated correctly after adding and removing a pod",
|
||||
name: "metadata matching pod affinity and anti-affinity are updated correctly after adding and removing a pod",
|
||||
pendingPod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pending", Labels: selector1},
|
||||
},
|
||||
@@ -395,44 +351,46 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
allPodLister := schedulertesting.FakePodLister(append(test.existingPods, test.addedPod))
|
||||
// getMeta creates predicate meta data given the list of pods.
|
||||
getMeta := func(lister schedulertesting.FakePodLister) (*predicateMetadata, map[string]*schedulercache.NodeInfo) {
|
||||
nodeInfoMap := schedulercache.CreateNodeNameToInfoMap(lister, test.nodes)
|
||||
// nodeList is a list of non-pointer nodes to feed to FakeNodeListInfo.
|
||||
nodeList := []v1.Node{}
|
||||
for _, n := range test.nodes {
|
||||
nodeList = append(nodeList, *n)
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
allPodLister := schedulertesting.FakePodLister(append(test.existingPods, test.addedPod))
|
||||
// getMeta creates predicate meta data given the list of pods.
|
||||
getMeta := func(lister schedulertesting.FakePodLister) (*predicateMetadata, map[string]*schedulercache.NodeInfo) {
|
||||
nodeInfoMap := schedulercache.CreateNodeNameToInfoMap(lister, test.nodes)
|
||||
// nodeList is a list of non-pointer nodes to feed to FakeNodeListInfo.
|
||||
nodeList := []v1.Node{}
|
||||
for _, n := range test.nodes {
|
||||
nodeList = append(nodeList, *n)
|
||||
}
|
||||
_, precompute := NewServiceAffinityPredicate(lister, schedulertesting.FakeServiceLister(test.services), FakeNodeListInfo(nodeList), nil)
|
||||
RegisterPredicateMetadataProducer("ServiceAffinityMetaProducer", precompute)
|
||||
pmf := PredicateMetadataFactory{lister}
|
||||
meta := pmf.GetMetadata(test.pendingPod, nodeInfoMap)
|
||||
return meta.(*predicateMetadata), nodeInfoMap
|
||||
}
|
||||
_, precompute := NewServiceAffinityPredicate(lister, schedulertesting.FakeServiceLister(test.services), FakeNodeListInfo(nodeList), nil)
|
||||
RegisterPredicateMetadataProducer("ServiceAffinityMetaProducer", precompute)
|
||||
pmf := PredicateMetadataFactory{lister}
|
||||
meta := pmf.GetMetadata(test.pendingPod, nodeInfoMap)
|
||||
return meta.(*predicateMetadata), nodeInfoMap
|
||||
}
|
||||
|
||||
// allPodsMeta is meta data produced when all pods, including test.addedPod
|
||||
// are given to the metadata producer.
|
||||
allPodsMeta, _ := getMeta(allPodLister)
|
||||
// existingPodsMeta1 is meta data produced for test.existingPods (without test.addedPod).
|
||||
existingPodsMeta1, nodeInfoMap := getMeta(schedulertesting.FakePodLister(test.existingPods))
|
||||
// Add test.addedPod to existingPodsMeta1 and make sure meta is equal to allPodsMeta
|
||||
nodeInfo := nodeInfoMap[test.addedPod.Spec.NodeName]
|
||||
if err := existingPodsMeta1.AddPod(test.addedPod, nodeInfo); err != nil {
|
||||
t.Errorf("test [%v]: error adding pod to meta: %v", test.description, err)
|
||||
}
|
||||
if err := predicateMetadataEquivalent(allPodsMeta, existingPodsMeta1); err != nil {
|
||||
t.Errorf("test [%v]: meta data are not equivalent: %v", test.description, err)
|
||||
}
|
||||
// Remove the added pod and from existingPodsMeta1 an make sure it is equal
|
||||
// to meta generated for existing pods.
|
||||
existingPodsMeta2, _ := getMeta(schedulertesting.FakePodLister(test.existingPods))
|
||||
if err := existingPodsMeta1.RemovePod(test.addedPod); err != nil {
|
||||
t.Errorf("test [%v]: error removing pod from meta: %v", test.description, err)
|
||||
}
|
||||
if err := predicateMetadataEquivalent(existingPodsMeta1, existingPodsMeta2); err != nil {
|
||||
t.Errorf("test [%v]: meta data are not equivalent: %v", test.description, err)
|
||||
}
|
||||
// allPodsMeta is meta data produced when all pods, including test.addedPod
|
||||
// are given to the metadata producer.
|
||||
allPodsMeta, _ := getMeta(allPodLister)
|
||||
// existingPodsMeta1 is meta data produced for test.existingPods (without test.addedPod).
|
||||
existingPodsMeta1, nodeInfoMap := getMeta(schedulertesting.FakePodLister(test.existingPods))
|
||||
// Add test.addedPod to existingPodsMeta1 and make sure meta is equal to allPodsMeta
|
||||
nodeInfo := nodeInfoMap[test.addedPod.Spec.NodeName]
|
||||
if err := existingPodsMeta1.AddPod(test.addedPod, nodeInfo); err != nil {
|
||||
t.Errorf("error adding pod to meta: %v", err)
|
||||
}
|
||||
if err := predicateMetadataEquivalent(allPodsMeta, existingPodsMeta1); err != nil {
|
||||
t.Errorf("meta data are not equivalent: %v", err)
|
||||
}
|
||||
// Remove the added pod and from existingPodsMeta1 an make sure it is equal
|
||||
// to meta generated for existing pods.
|
||||
existingPodsMeta2, _ := getMeta(schedulertesting.FakePodLister(test.existingPods))
|
||||
if err := existingPodsMeta1.RemovePod(test.addedPod); err != nil {
|
||||
t.Errorf("error removing pod from meta: %v", err)
|
||||
}
|
||||
if err := predicateMetadataEquivalent(existingPodsMeta1, existingPodsMeta2); err != nil {
|
||||
t.Errorf("meta data are not equivalent: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -463,52 +421,93 @@ func TestPredicateMetadata_ShallowCopy(t *testing.T) {
|
||||
HostIP: "1.2.3.4",
|
||||
},
|
||||
},
|
||||
matchingAntiAffinityTerms: map[string][]matchingPodAntiAffinityTerm{
|
||||
"term1": {
|
||||
{
|
||||
term: &v1.PodAffinityTerm{TopologyKey: "node"},
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||
},
|
||||
topologyPairsAntiAffinityPodsMap: &topologyPairsMaps{
|
||||
topologyPairToPods: map[topologyPair]podSet{
|
||||
{key: "name", value: "machine1"}: {
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p2", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeC"},
|
||||
}: struct{}{},
|
||||
},
|
||||
{key: "name", value: "machine2"}: {
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeA"},
|
||||
}: struct{}{},
|
||||
},
|
||||
},
|
||||
podToTopologyPairs: map[string]topologyPairSet{
|
||||
"p2_": {
|
||||
topologyPair{key: "name", value: "machine1"}: struct{}{},
|
||||
},
|
||||
"p1_": {
|
||||
topologyPair{key: "name", value: "machine2"}: struct{}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeNameToMatchingAffinityPods: map[string][]*v1.Pod{
|
||||
"nodeA": {
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeA"},
|
||||
topologyPairsPotentialAffinityPods: &topologyPairsMaps{
|
||||
topologyPairToPods: map[topologyPair]podSet{
|
||||
{key: "name", value: "nodeA"}: {
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeA"},
|
||||
}: struct{}{},
|
||||
},
|
||||
{key: "name", value: "nodeC"}: {
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p2"},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "nodeC",
|
||||
},
|
||||
}: struct{}{},
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p6", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeC"},
|
||||
}: struct{}{},
|
||||
},
|
||||
},
|
||||
"nodeC": {
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p2"},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "nodeC",
|
||||
},
|
||||
podToTopologyPairs: map[string]topologyPairSet{
|
||||
"p1_": {
|
||||
topologyPair{key: "name", value: "nodeA"}: struct{}{},
|
||||
},
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p6", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeC"},
|
||||
"p2_": {
|
||||
topologyPair{key: "name", value: "nodeC"}: struct{}{},
|
||||
},
|
||||
"p6_": {
|
||||
topologyPair{key: "name", value: "nodeC"}: struct{}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeNameToMatchingAntiAffinityPods: map[string][]*v1.Pod{
|
||||
"nodeN": {
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeN"},
|
||||
topologyPairsPotentialAntiAffinityPods: &topologyPairsMaps{
|
||||
topologyPairToPods: map[topologyPair]podSet{
|
||||
{key: "name", value: "nodeN"}: {
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeN"},
|
||||
}: struct{}{},
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p2"},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "nodeM",
|
||||
},
|
||||
}: struct{}{},
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p3"},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "nodeM",
|
||||
},
|
||||
}: struct{}{},
|
||||
},
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p2"},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "nodeM",
|
||||
},
|
||||
},
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p3"},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "nodeM",
|
||||
},
|
||||
{key: "name", value: "nodeM"}: {
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p6", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeM"},
|
||||
}: struct{}{},
|
||||
},
|
||||
},
|
||||
"nodeM": {
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p6", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeM"},
|
||||
podToTopologyPairs: map[string]topologyPairSet{
|
||||
"p1_": {
|
||||
topologyPair{key: "name", value: "nodeN"}: struct{}{},
|
||||
},
|
||||
"p2_": {
|
||||
topologyPair{key: "name", value: "nodeN"}: struct{}{},
|
||||
},
|
||||
"p3_": {
|
||||
topologyPair{key: "name", value: "nodeN"}: struct{}{},
|
||||
},
|
||||
"p6_": {
|
||||
topologyPair{key: "name", value: "nodeM"}: struct{}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -526,3 +525,269 @@ func TestPredicateMetadata_ShallowCopy(t *testing.T) {
|
||||
t.Errorf("Copy is not equal to source!")
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetTPMapMatchingIncomingAffinityAntiAffinity tests against method getTPMapMatchingIncomingAffinityAntiAffinity
|
||||
// on Anti Affinity cases
|
||||
func TestGetTPMapMatchingIncomingAffinityAntiAffinity(t *testing.T) {
|
||||
newPodAffinityTerms := func(keys ...string) []v1.PodAffinityTerm {
|
||||
var terms []v1.PodAffinityTerm
|
||||
for _, key := range keys {
|
||||
terms = append(terms, v1.PodAffinityTerm{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: key,
|
||||
Operator: metav1.LabelSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "hostname",
|
||||
})
|
||||
}
|
||||
return terms
|
||||
}
|
||||
newPod := func(labels ...string) *v1.Pod {
|
||||
labelMap := make(map[string]string)
|
||||
for _, l := range labels {
|
||||
labelMap[l] = ""
|
||||
}
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "normal", Labels: labelMap},
|
||||
Spec: v1.PodSpec{NodeName: "nodeA"},
|
||||
}
|
||||
}
|
||||
normalPodA := newPod("aaa")
|
||||
normalPodB := newPod("bbb")
|
||||
normalPodAB := newPod("aaa", "bbb")
|
||||
nodeA := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"hostname": "nodeA"}}}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
existingPods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
pod *v1.Pod
|
||||
wantAffinityPodsMaps *topologyPairsMaps
|
||||
wantAntiAffinityPodsMaps *topologyPairsMaps
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "nil test",
|
||||
nodes: []*v1.Node{nodeA},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "aaa-normal"},
|
||||
},
|
||||
wantAffinityPodsMaps: newTopologyPairsMaps(),
|
||||
wantAntiAffinityPodsMaps: newTopologyPairsMaps(),
|
||||
},
|
||||
{
|
||||
name: "incoming pod without affinity/anti-affinity causes a no-op",
|
||||
existingPods: []*v1.Pod{normalPodA},
|
||||
nodes: []*v1.Node{nodeA},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "aaa-normal"},
|
||||
},
|
||||
wantAffinityPodsMaps: newTopologyPairsMaps(),
|
||||
wantAntiAffinityPodsMaps: newTopologyPairsMaps(),
|
||||
},
|
||||
{
|
||||
name: "no pod has label that violates incoming pod's affinity and anti-affinity",
|
||||
existingPods: []*v1.Pod{normalPodB},
|
||||
nodes: []*v1.Node{nodeA},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "aaa-anti"},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa"),
|
||||
},
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantAffinityPodsMaps: newTopologyPairsMaps(),
|
||||
wantAntiAffinityPodsMaps: newTopologyPairsMaps(),
|
||||
},
|
||||
{
|
||||
name: "existing pod matches incoming pod's affinity and anti-affinity - single term case",
|
||||
existingPods: []*v1.Pod{normalPodA},
|
||||
nodes: []*v1.Node{nodeA},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "affi-antiaffi"},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa"),
|
||||
},
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantAffinityPodsMaps: &topologyPairsMaps{
|
||||
topologyPairToPods: map[topologyPair]podSet{
|
||||
{key: "hostname", value: "nodeA"}: {normalPodA: struct{}{}},
|
||||
},
|
||||
podToTopologyPairs: map[string]topologyPairSet{
|
||||
"normal_": {
|
||||
topologyPair{key: "hostname", value: "nodeA"}: struct{}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantAntiAffinityPodsMaps: &topologyPairsMaps{
|
||||
topologyPairToPods: map[topologyPair]podSet{
|
||||
{key: "hostname", value: "nodeA"}: {normalPodA: struct{}{}},
|
||||
},
|
||||
podToTopologyPairs: map[string]topologyPairSet{
|
||||
"normal_": {
|
||||
topologyPair{key: "hostname", value: "nodeA"}: struct{}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing pod matches incoming pod's affinity and anti-affinity - mutiple terms case",
|
||||
existingPods: []*v1.Pod{normalPodAB},
|
||||
nodes: []*v1.Node{nodeA},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "affi-antiaffi"},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa", "bbb"),
|
||||
},
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantAffinityPodsMaps: &topologyPairsMaps{
|
||||
topologyPairToPods: map[topologyPair]podSet{
|
||||
{key: "hostname", value: "nodeA"}: {normalPodAB: struct{}{}},
|
||||
},
|
||||
podToTopologyPairs: map[string]topologyPairSet{
|
||||
"normal_": {
|
||||
topologyPair{key: "hostname", value: "nodeA"}: struct{}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantAntiAffinityPodsMaps: &topologyPairsMaps{
|
||||
topologyPairToPods: map[topologyPair]podSet{
|
||||
{key: "hostname", value: "nodeA"}: {normalPodAB: struct{}{}},
|
||||
},
|
||||
podToTopologyPairs: map[string]topologyPairSet{
|
||||
"normal_": {
|
||||
topologyPair{key: "hostname", value: "nodeA"}: struct{}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing pod not match incoming pod's affinity but matches anti-affinity",
|
||||
existingPods: []*v1.Pod{normalPodA},
|
||||
nodes: []*v1.Node{nodeA},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "affi-antiaffi"},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa", "bbb"),
|
||||
},
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa", "bbb"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantAffinityPodsMaps: newTopologyPairsMaps(),
|
||||
wantAntiAffinityPodsMaps: &topologyPairsMaps{
|
||||
topologyPairToPods: map[topologyPair]podSet{
|
||||
{key: "hostname", value: "nodeA"}: {normalPodA: struct{}{}},
|
||||
},
|
||||
podToTopologyPairs: map[string]topologyPairSet{
|
||||
"normal_": {
|
||||
topologyPair{key: "hostname", value: "nodeA"}: struct{}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "incoming pod's anti-affinity has more than one term - existing pod violates partial term - case 1",
|
||||
existingPods: []*v1.Pod{normalPodAB},
|
||||
nodes: []*v1.Node{nodeA},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "anaffi-antiaffiti"},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa", "ccc"),
|
||||
},
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa", "ccc"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantAffinityPodsMaps: newTopologyPairsMaps(),
|
||||
wantAntiAffinityPodsMaps: &topologyPairsMaps{
|
||||
topologyPairToPods: map[topologyPair]podSet{
|
||||
{key: "hostname", value: "nodeA"}: {normalPodAB: struct{}{}},
|
||||
},
|
||||
podToTopologyPairs: map[string]topologyPairSet{
|
||||
"normal_": {
|
||||
topologyPair{key: "hostname", value: "nodeA"}: struct{}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "incoming pod's anti-affinity has more than one term - existing pod violates partial term - case 2",
|
||||
existingPods: []*v1.Pod{normalPodB},
|
||||
nodes: []*v1.Node{nodeA},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "affi-antiaffi"},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa", "bbb"),
|
||||
},
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa", "bbb"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantAffinityPodsMaps: newTopologyPairsMaps(),
|
||||
wantAntiAffinityPodsMaps: &topologyPairsMaps{
|
||||
topologyPairToPods: map[topologyPair]podSet{
|
||||
{key: "hostname", value: "nodeA"}: {normalPodB: struct{}{}},
|
||||
},
|
||||
podToTopologyPairs: map[string]topologyPairSet{
|
||||
"normal_": {
|
||||
topologyPair{key: "hostname", value: "nodeA"}: struct{}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
nodeInfoMap := schedulercache.CreateNodeNameToInfoMap(tt.existingPods, tt.nodes)
|
||||
|
||||
gotAffinityPodsMaps, gotAntiAffinityPodsMaps, err := getTPMapMatchingIncomingAffinityAntiAffinity(tt.pod, nodeInfoMap)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("getTPMapMatchingIncomingAffinityAntiAffinity() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(gotAffinityPodsMaps, tt.wantAffinityPodsMaps) {
|
||||
t.Errorf("getTPMapMatchingIncomingAffinityAntiAffinity() gotAffinityPodsMaps = %#v, want %#v", gotAffinityPodsMaps, tt.wantAffinityPodsMaps)
|
||||
}
|
||||
if !reflect.DeepEqual(gotAntiAffinityPodsMaps, tt.wantAntiAffinityPodsMaps) {
|
||||
t.Errorf("getTPMapMatchingIncomingAffinityAntiAffinity() gotAntiAffinityPodsMaps = %#v, want %#v", gotAntiAffinityPodsMaps, tt.wantAntiAffinityPodsMaps)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
307
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/predicates.go
generated
vendored
307
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/predicates.go
generated
vendored
@@ -20,8 +20,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
@@ -36,7 +36,6 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
@@ -84,6 +83,8 @@ const (
|
||||
MaxGCEPDVolumeCountPred = "MaxGCEPDVolumeCount"
|
||||
// MaxAzureDiskVolumeCountPred defines the name of predicate MaxAzureDiskVolumeCount.
|
||||
MaxAzureDiskVolumeCountPred = "MaxAzureDiskVolumeCount"
|
||||
// MaxCSIVolumeCountPred defines the predicate that decides how many CSI volumes should be attached
|
||||
MaxCSIVolumeCountPred = "MaxCSIVolumeCountPred"
|
||||
// NoVolumeZoneConflictPred defines the name of predicate NoVolumeZoneConflict.
|
||||
NoVolumeZoneConflictPred = "NoVolumeZoneConflict"
|
||||
// CheckNodeMemoryPressurePred defines the name of predicate CheckNodeMemoryPressure.
|
||||
@@ -93,10 +94,6 @@ const (
|
||||
// CheckNodePIDPressurePred defines the name of predicate CheckNodePIDPressure.
|
||||
CheckNodePIDPressurePred = "CheckNodePIDPressure"
|
||||
|
||||
// DefaultMaxEBSVolumes is the limit for volumes attached to an instance.
|
||||
// Amazon recommends no more than 40; the system root volume uses at least one.
|
||||
// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#linux-specific-volume-limits
|
||||
DefaultMaxEBSVolumes = 39
|
||||
// DefaultMaxGCEPDVolumes defines the maximum number of PD Volumes for GCE
|
||||
// GCE instances can have up to 16 PD volumes attached.
|
||||
DefaultMaxGCEPDVolumes = 16
|
||||
@@ -134,7 +131,7 @@ var (
|
||||
GeneralPred, HostNamePred, PodFitsHostPortsPred,
|
||||
MatchNodeSelectorPred, PodFitsResourcesPred, NoDiskConflictPred,
|
||||
PodToleratesNodeTaintsPred, PodToleratesNodeNoExecuteTaintsPred, CheckNodeLabelPresencePred,
|
||||
CheckServiceAffinityPred, MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred,
|
||||
CheckServiceAffinityPred, MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred, MaxCSIVolumeCountPred,
|
||||
MaxAzureDiskVolumeCountPred, CheckVolumeBindingPred, NoVolumeZoneConflictPred,
|
||||
CheckNodeMemoryPressurePred, CheckNodePIDPressurePred, CheckNodeDiskPressurePred, MatchInterPodAffinityPred}
|
||||
)
|
||||
@@ -291,7 +288,7 @@ func NoDiskConflict(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *sch
|
||||
type MaxPDVolumeCountChecker struct {
|
||||
filter VolumeFilter
|
||||
volumeLimitKey v1.ResourceName
|
||||
maxVolumes int
|
||||
maxVolumeFunc func(node *v1.Node) int
|
||||
pvInfo PersistentVolumeInfo
|
||||
pvcInfo PersistentVolumeClaimInfo
|
||||
|
||||
@@ -317,7 +314,6 @@ type VolumeFilter struct {
|
||||
func NewMaxPDVolumeCountPredicate(
|
||||
filterName string, pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate {
|
||||
var filter VolumeFilter
|
||||
var maxVolumes int
|
||||
var volumeLimitKey v1.ResourceName
|
||||
|
||||
switch filterName {
|
||||
@@ -325,15 +321,12 @@ func NewMaxPDVolumeCountPredicate(
|
||||
case EBSVolumeFilterType:
|
||||
filter = EBSVolumeFilter
|
||||
volumeLimitKey = v1.ResourceName(volumeutil.EBSVolumeLimitKey)
|
||||
maxVolumes = getMaxVols(DefaultMaxEBSVolumes)
|
||||
case GCEPDVolumeFilterType:
|
||||
filter = GCEPDVolumeFilter
|
||||
volumeLimitKey = v1.ResourceName(volumeutil.GCEVolumeLimitKey)
|
||||
maxVolumes = getMaxVols(DefaultMaxGCEPDVolumes)
|
||||
case AzureDiskVolumeFilterType:
|
||||
filter = AzureDiskVolumeFilter
|
||||
volumeLimitKey = v1.ResourceName(volumeutil.AzureVolumeLimitKey)
|
||||
maxVolumes = getMaxVols(DefaultMaxAzureDiskVolumes)
|
||||
default:
|
||||
glog.Fatalf("Wrong filterName, Only Support %v %v %v ", EBSVolumeFilterType,
|
||||
GCEPDVolumeFilterType, AzureDiskVolumeFilterType)
|
||||
@@ -343,7 +336,7 @@ func NewMaxPDVolumeCountPredicate(
|
||||
c := &MaxPDVolumeCountChecker{
|
||||
filter: filter,
|
||||
volumeLimitKey: volumeLimitKey,
|
||||
maxVolumes: maxVolumes,
|
||||
maxVolumeFunc: getMaxVolumeFunc(filterName),
|
||||
pvInfo: pvInfo,
|
||||
pvcInfo: pvcInfo,
|
||||
randomVolumeIDPrefix: rand.String(32),
|
||||
@@ -352,19 +345,52 @@ func NewMaxPDVolumeCountPredicate(
|
||||
return c.predicate
|
||||
}
|
||||
|
||||
// getMaxVols checks the max PD volumes environment variable, otherwise returning a default value
|
||||
func getMaxVols(defaultVal int) int {
|
||||
func getMaxVolumeFunc(filterName string) func(node *v1.Node) int {
|
||||
return func(node *v1.Node) int {
|
||||
maxVolumesFromEnv := getMaxVolLimitFromEnv()
|
||||
if maxVolumesFromEnv > 0 {
|
||||
return maxVolumesFromEnv
|
||||
}
|
||||
|
||||
var nodeInstanceType string
|
||||
for k, v := range node.ObjectMeta.Labels {
|
||||
if k == kubeletapis.LabelInstanceType {
|
||||
nodeInstanceType = v
|
||||
}
|
||||
}
|
||||
switch filterName {
|
||||
case EBSVolumeFilterType:
|
||||
return getMaxEBSVolume(nodeInstanceType)
|
||||
case GCEPDVolumeFilterType:
|
||||
return DefaultMaxGCEPDVolumes
|
||||
case AzureDiskVolumeFilterType:
|
||||
return DefaultMaxAzureDiskVolumes
|
||||
default:
|
||||
return -1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getMaxEBSVolume(nodeInstanceType string) int {
|
||||
if ok, _ := regexp.MatchString(volumeutil.EBSNitroLimitRegex, nodeInstanceType); ok {
|
||||
return volumeutil.DefaultMaxEBSNitroVolumeLimit
|
||||
}
|
||||
return volumeutil.DefaultMaxEBSVolumes
|
||||
}
|
||||
|
||||
// getMaxVolLimitFromEnv checks the max PD volumes environment variable, otherwise returning a default value
|
||||
func getMaxVolLimitFromEnv() int {
|
||||
if rawMaxVols := os.Getenv(KubeMaxPDVols); rawMaxVols != "" {
|
||||
if parsedMaxVols, err := strconv.Atoi(rawMaxVols); err != nil {
|
||||
glog.Errorf("Unable to parse maximum PD volumes value, using default of %v: %v", defaultVal, err)
|
||||
glog.Errorf("Unable to parse maximum PD volumes value, using default: %v", err)
|
||||
} else if parsedMaxVols <= 0 {
|
||||
glog.Errorf("Maximum PD volumes must be a positive value, using default of %v", defaultVal)
|
||||
glog.Errorf("Maximum PD volumes must be a positive value, using default ")
|
||||
} else {
|
||||
return parsedMaxVols
|
||||
}
|
||||
}
|
||||
|
||||
return defaultVal
|
||||
return -1
|
||||
}
|
||||
|
||||
func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace string, filteredVolumes map[string]bool) error {
|
||||
@@ -454,7 +480,7 @@ func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta algorithm.Predicat
|
||||
}
|
||||
|
||||
numNewVolumes := len(newVolumes)
|
||||
maxAttachLimit := c.maxVolumes
|
||||
maxAttachLimit := c.maxVolumeFunc(nodeInfo.Node())
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
||||
volumeLimits := nodeInfo.VolumeLimits()
|
||||
@@ -1159,7 +1185,7 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm
|
||||
// targetPod matches all the terms and their topologies, 2) whether targetPod
|
||||
// matches all the terms label selector and namespaces (AKA term properties),
|
||||
// 3) any error.
|
||||
func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod *v1.Pod, targetPod *v1.Pod, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) (bool, bool, error) {
|
||||
func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod, targetPod *v1.Pod, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) (bool, bool, error) {
|
||||
if len(terms) == 0 {
|
||||
return false, false, fmt.Errorf("terms array is empty")
|
||||
}
|
||||
@@ -1167,7 +1193,7 @@ func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod *v1.Pod, targetPod *
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
if !podMatchesAffinityTermProperties(targetPod, props) {
|
||||
if !podMatchesAllAffinityTermProperties(targetPod, props) {
|
||||
return false, false, nil
|
||||
}
|
||||
// Namespace and selector of the terms have matched. Now we check topology of the terms.
|
||||
@@ -1214,120 +1240,63 @@ func GetPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.Po
|
||||
return terms
|
||||
}
|
||||
|
||||
func getMatchingAntiAffinityTerms(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) (map[string][]matchingPodAntiAffinityTerm, error) {
|
||||
allNodeNames := make([]string, 0, len(nodeInfoMap))
|
||||
for name := range nodeInfoMap {
|
||||
allNodeNames = append(allNodeNames, name)
|
||||
// getMatchingAntiAffinityTopologyPairs calculates the following for "existingPod" on given node:
|
||||
// (1) Whether it has PodAntiAffinity
|
||||
// (2) Whether ANY AffinityTerm matches the incoming pod
|
||||
func getMatchingAntiAffinityTopologyPairsOfPod(newPod *v1.Pod, existingPod *v1.Pod, node *v1.Node) (*topologyPairsMaps, error) {
|
||||
affinity := existingPod.Spec.Affinity
|
||||
if affinity == nil || affinity.PodAntiAffinity == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var lock sync.Mutex
|
||||
var firstError error
|
||||
result := make(map[string][]matchingPodAntiAffinityTerm)
|
||||
appendResult := func(toAppend map[string][]matchingPodAntiAffinityTerm) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
for uid, terms := range toAppend {
|
||||
result[uid] = append(result[uid], terms...)
|
||||
}
|
||||
}
|
||||
catchError := func(err error) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
topologyMaps := newTopologyPairsMaps()
|
||||
for _, term := range GetPodAntiAffinityTerms(affinity.PodAntiAffinity) {
|
||||
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if priorityutil.PodMatchesTermsNamespaceAndSelector(newPod, namespaces, selector) {
|
||||
if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
|
||||
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
|
||||
topologyMaps.addTopologyPair(pair, existingPod)
|
||||
}
|
||||
}
|
||||
}
|
||||
return topologyMaps, nil
|
||||
}
|
||||
|
||||
processNode := func(i int) {
|
||||
nodeInfo := nodeInfoMap[allNodeNames[i]]
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
catchError(fmt.Errorf("node not found"))
|
||||
return
|
||||
}
|
||||
nodeResult := make(map[string][]matchingPodAntiAffinityTerm)
|
||||
for _, existingPod := range nodeInfo.PodsWithAffinity() {
|
||||
affinity := existingPod.Spec.Affinity
|
||||
if affinity == nil {
|
||||
func (c *PodAffinityChecker) getMatchingAntiAffinityTopologyPairsOfPods(pod *v1.Pod, existingPods []*v1.Pod) (*topologyPairsMaps, error) {
|
||||
topologyMaps := newTopologyPairsMaps()
|
||||
|
||||
for _, existingPod := range existingPods {
|
||||
existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
glog.Errorf("Node not found, %v", existingPod.Spec.NodeName)
|
||||
continue
|
||||
}
|
||||
for _, term := range GetPodAntiAffinityTerms(affinity.PodAntiAffinity) {
|
||||
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
catchError(err)
|
||||
return
|
||||
}
|
||||
if priorityutil.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) {
|
||||
existingPodFullName := schedutil.GetPodFullName(existingPod)
|
||||
nodeResult[existingPodFullName] = append(
|
||||
nodeResult[existingPodFullName],
|
||||
matchingPodAntiAffinityTerm{term: &term, node: node})
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if len(nodeResult) > 0 {
|
||||
appendResult(nodeResult)
|
||||
existingPodTopologyMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, existingPod, existingPodNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
topologyMaps.appendMaps(existingPodTopologyMaps)
|
||||
}
|
||||
workqueue.Parallelize(16, len(allNodeNames), processNode)
|
||||
return result, firstError
|
||||
}
|
||||
|
||||
func getMatchingAntiAffinityTermsOfExistingPod(newPod *v1.Pod, existingPod *v1.Pod, node *v1.Node) ([]matchingPodAntiAffinityTerm, error) {
|
||||
var result []matchingPodAntiAffinityTerm
|
||||
affinity := existingPod.Spec.Affinity
|
||||
if affinity != nil && affinity.PodAntiAffinity != nil {
|
||||
for _, term := range GetPodAntiAffinityTerms(affinity.PodAntiAffinity) {
|
||||
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if priorityutil.PodMatchesTermsNamespaceAndSelector(newPod, namespaces, selector) {
|
||||
result = append(result, matchingPodAntiAffinityTerm{term: &term, node: node})
|
||||
}
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *v1.Pod, allPods []*v1.Pod) (map[string][]matchingPodAntiAffinityTerm, error) {
|
||||
result := make(map[string][]matchingPodAntiAffinityTerm)
|
||||
for _, existingPod := range allPods {
|
||||
affinity := existingPod.Spec.Affinity
|
||||
if affinity != nil && affinity.PodAntiAffinity != nil {
|
||||
existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
glog.Errorf("Node not found, %v", existingPod.Spec.NodeName)
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
existingPodMatchingTerms, err := getMatchingAntiAffinityTermsOfExistingPod(pod, existingPod, existingPodNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(existingPodMatchingTerms) > 0 {
|
||||
existingPodFullName := schedutil.GetPodFullName(existingPod)
|
||||
result[existingPodFullName] = existingPodMatchingTerms
|
||||
}
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
return topologyMaps, nil
|
||||
}
|
||||
|
||||
// Checks if scheduling the pod onto this node would break any anti-affinity
|
||||
// rules indicated by the existing pods.
|
||||
// terms indicated by the existing pods.
|
||||
func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (algorithm.PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return ErrExistingPodsAntiAffinityRulesNotMatch, fmt.Errorf("Node is nil")
|
||||
}
|
||||
var matchingTerms map[string][]matchingPodAntiAffinityTerm
|
||||
var topologyMaps *topologyPairsMaps
|
||||
if predicateMeta, ok := meta.(*predicateMetadata); ok {
|
||||
matchingTerms = predicateMeta.matchingAntiAffinityTerms
|
||||
topologyMaps = predicateMeta.topologyPairsAntiAffinityPodsMap
|
||||
} else {
|
||||
// Filter out pods whose nodeName is equal to nodeInfo.node.Name, but are not
|
||||
// present in nodeInfo. Pods on other nodes pass the filter.
|
||||
@@ -1337,63 +1306,63 @@ func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta
|
||||
glog.Error(errMessage)
|
||||
return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage)
|
||||
}
|
||||
if matchingTerms, err = c.getMatchingAntiAffinityTerms(pod, filteredPods); err != nil {
|
||||
if topologyMaps, err = c.getMatchingAntiAffinityTopologyPairsOfPods(pod, filteredPods); err != nil {
|
||||
errMessage := fmt.Sprintf("Failed to get all terms that pod %+v matches, err: %+v", podName(pod), err)
|
||||
glog.Error(errMessage)
|
||||
return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage)
|
||||
}
|
||||
}
|
||||
for _, terms := range matchingTerms {
|
||||
for i := range terms {
|
||||
term := &terms[i]
|
||||
if len(term.term.TopologyKey) == 0 {
|
||||
errMessage := fmt.Sprintf("Empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity")
|
||||
glog.Error(errMessage)
|
||||
return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage)
|
||||
}
|
||||
if priorityutil.NodesHaveSameTopologyKey(node, term.node, term.term.TopologyKey) {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v,because of PodAntiAffinityTerm %v",
|
||||
podName(pod), node.Name, term.term)
|
||||
return ErrExistingPodsAntiAffinityRulesNotMatch, nil
|
||||
}
|
||||
|
||||
// Iterate over topology pairs to get any of the pods being affected by
|
||||
// the scheduled pod anti-affinity terms
|
||||
for topologyKey, topologyValue := range node.Labels {
|
||||
if topologyMaps.topologyPairToPods[topologyPair{key: topologyKey, value: topologyValue}] != nil {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v", podName(pod), node.Name)
|
||||
return ErrExistingPodsAntiAffinityRulesNotMatch, nil
|
||||
}
|
||||
}
|
||||
if glog.V(10) {
|
||||
// We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
// not logged. There is visible performance gain from it.
|
||||
glog.Infof("Schedule Pod %+v on Node %+v is allowed, existing pods anti-affinity rules satisfied.",
|
||||
glog.Infof("Schedule Pod %+v on Node %+v is allowed, existing pods anti-affinity terms satisfied.",
|
||||
podName(pod), node.Name)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// anyPodsMatchingTopologyTerms checks whether any of the nodes given via
|
||||
// "targetPods" matches topology of all the "terms" for the give "pod" and "nodeInfo".
|
||||
func (c *PodAffinityChecker) anyPodsMatchingTopologyTerms(pod *v1.Pod, targetPods map[string][]*v1.Pod, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) (bool, error) {
|
||||
for nodeName, targetPods := range targetPods {
|
||||
targetPodNodeInfo, err := c.info.GetNodeInfo(nodeName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(targetPods) > 0 {
|
||||
allTermsMatched := true
|
||||
for _, term := range terms {
|
||||
if !priorityutil.NodesHaveSameTopologyKey(nodeInfo.Node(), targetPodNodeInfo, term.TopologyKey) {
|
||||
allTermsMatched = false
|
||||
break
|
||||
}
|
||||
// nodeMatchesAllTopologyTerms checks whether "nodeInfo" matches
|
||||
// topology of all the "terms" for the given "pod".
|
||||
func (c *PodAffinityChecker) nodeMatchesAllTopologyTerms(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) bool {
|
||||
node := nodeInfo.Node()
|
||||
for _, term := range terms {
|
||||
if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
|
||||
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
|
||||
if _, ok := topologyPairs.topologyPairToPods[pair]; !ok {
|
||||
return false
|
||||
}
|
||||
if allTermsMatched {
|
||||
// We have 1 or more pods on the target node that have already matched namespace and selector
|
||||
// and all of the terms topologies matched the target node. So, there is at least 1 matching pod on the node.
|
||||
return true, nil
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// nodeMatchesAnyTopologyTerm checks whether "nodeInfo" matches
|
||||
// topology of any "term" for the given "pod".
|
||||
func (c *PodAffinityChecker) nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) bool {
|
||||
node := nodeInfo.Node()
|
||||
for _, term := range terms {
|
||||
if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
|
||||
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
|
||||
if _, ok := topologyPairs.topologyPairToPods[pair]; ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
return false
|
||||
}
|
||||
|
||||
// Checks if scheduling the pod onto this node would break any rules of this pod.
|
||||
// Checks if scheduling the pod onto this node would break any term of this pod.
|
||||
func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
|
||||
meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo,
|
||||
affinity *v1.Affinity) (algorithm.PredicateFailureReason, error) {
|
||||
@@ -1403,20 +1372,15 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
|
||||
}
|
||||
if predicateMeta, ok := meta.(*predicateMetadata); ok {
|
||||
// Check all affinity terms.
|
||||
matchingPods := predicateMeta.nodeNameToMatchingAffinityPods
|
||||
topologyPairsPotentialAffinityPods := predicateMeta.topologyPairsPotentialAffinityPods
|
||||
if affinityTerms := GetPodAffinityTerms(affinity.PodAffinity); len(affinityTerms) > 0 {
|
||||
matchExists, err := c.anyPodsMatchingTopologyTerms(pod, matchingPods, nodeInfo, affinityTerms)
|
||||
if err != nil {
|
||||
errMessage := fmt.Sprintf("Cannot schedule pod %+v onto node %v, because of PodAffinity, err: %v", podName(pod), node.Name, err)
|
||||
glog.Errorf(errMessage)
|
||||
return ErrPodAffinityRulesNotMatch, errors.New(errMessage)
|
||||
}
|
||||
matchExists := c.nodeMatchesAllTopologyTerms(pod, topologyPairsPotentialAffinityPods, nodeInfo, affinityTerms)
|
||||
if !matchExists {
|
||||
// This pod may the first pod in a series that have affinity to themselves. In order
|
||||
// to not leave such pods in pending state forever, we check that if no other pod
|
||||
// in the cluster matches the namespace and selector of this pod and the pod matches
|
||||
// its own terms, then we allow the pod to pass the affinity check.
|
||||
if !(len(matchingPods) == 0 && targetPodMatchesAffinityOfPod(pod, pod)) {
|
||||
if !(len(topologyPairsPotentialAffinityPods.topologyPairToPods) == 0 && targetPodMatchesAffinityOfPod(pod, pod)) {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity",
|
||||
podName(pod), node.Name)
|
||||
return ErrPodAffinityRulesNotMatch, nil
|
||||
@@ -1425,16 +1389,16 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
|
||||
}
|
||||
|
||||
// Check all anti-affinity terms.
|
||||
matchingPods = predicateMeta.nodeNameToMatchingAntiAffinityPods
|
||||
topologyPairsPotentialAntiAffinityPods := predicateMeta.topologyPairsPotentialAntiAffinityPods
|
||||
if antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity); len(antiAffinityTerms) > 0 {
|
||||
matchExists, err := c.anyPodsMatchingTopologyTerms(pod, matchingPods, nodeInfo, antiAffinityTerms)
|
||||
if err != nil || matchExists {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinity, err: %v",
|
||||
podName(pod), node.Name, err)
|
||||
matchExists := c.nodeMatchesAnyTopologyTerm(pod, topologyPairsPotentialAntiAffinityPods, nodeInfo, antiAffinityTerms)
|
||||
if matchExists {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinity",
|
||||
podName(pod), node.Name)
|
||||
return ErrPodAntiAffinityRulesNotMatch, nil
|
||||
}
|
||||
}
|
||||
} else { // We don't have precomputed metadata. We have to follow a slow path to check affinity rules.
|
||||
} else { // We don't have precomputed metadata. We have to follow a slow path to check affinity terms.
|
||||
filteredPods, err := c.podLister.FilteredList(nodeInfo.Filter, labels.Everything())
|
||||
if err != nil {
|
||||
return ErrPodAffinityRulesNotMatch, err
|
||||
@@ -1472,7 +1436,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
|
||||
}
|
||||
|
||||
if !matchFound && len(affinityTerms) > 0 {
|
||||
// We have not been able to find any matches for the pod's affinity rules.
|
||||
// We have not been able to find any matches for the pod's affinity terms.
|
||||
// This pod may be the first pod in a series that have affinity to themselves. In order
|
||||
// to not leave such pods in pending state forever, we check that if no other pod
|
||||
// in the cluster matches the namespace and selector of this pod and the pod matches
|
||||
@@ -1506,7 +1470,14 @@ func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta algorithm.PredicateMetada
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil
|
||||
}
|
||||
|
||||
if nodeInfo.Node().Spec.Unschedulable {
|
||||
// If pod tolerate unschedulable taint, it's also tolerate `node.Spec.Unschedulable`.
|
||||
podToleratesUnschedulable := v1helper.TolerationsTolerateTaint(pod.Spec.Tolerations, &v1.Taint{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
// TODO (k82cn): deprecates `node.Spec.Unschedulable` in 1.13.
|
||||
if nodeInfo.Node().Spec.Unschedulable && !podToleratesUnschedulable {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnschedulable}, nil
|
||||
}
|
||||
|
||||
|
2165
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/predicates_test.go
generated
vendored
2165
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/predicates_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user