Add generated file
This PR adds generated files under pkg/client and vendor folder.
This commit is contained in:
110
vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD
generated
vendored
Normal file
110
vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD
generated
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"daemon_controller.go",
|
||||
"doc.go",
|
||||
"update.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/daemon",
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/daemon/util:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/integer:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"daemon_controller_test.go",
|
||||
"update_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/controller/daemon/util:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
8
vendor/k8s.io/kubernetes/pkg/controller/daemon/OWNERS
generated
vendored
Executable file
8
vendor/k8s.io/kubernetes/pkg/controller/daemon/OWNERS
generated
vendored
Executable file
@@ -0,0 +1,8 @@
|
||||
approvers:
|
||||
- mikedanese
|
||||
reviewers:
|
||||
- janetkuo
|
||||
- lukaszo
|
||||
- mikedanese
|
||||
- tnozicka
|
||||
- k82cn
|
1537
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller.go
generated
vendored
Normal file
1537
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2617
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller_test.go
generated
vendored
Normal file
2617
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
19
vendor/k8s.io/kubernetes/pkg/controller/daemon/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/controller/daemon/doc.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package daemon contains logic for watching and synchronizing
|
||||
// daemons.
|
||||
package daemon // import "k8s.io/kubernetes/pkg/controller/daemon"
|
441
vendor/k8s.io/kubernetes/pkg/controller/daemon/update.go
generated
vendored
Normal file
441
vendor/k8s.io/kubernetes/pkg/controller/daemon/update.go
generated
vendored
Normal file
@@ -0,0 +1,441 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon/util"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
)
|
||||
|
||||
// rollingUpdate deletes old daemon set pods making sure that no more than
|
||||
// ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable pods are unavailable
|
||||
func (dsc *DaemonSetsController) rollingUpdate(ds *apps.DaemonSet, hash string) error {
|
||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
|
||||
}
|
||||
|
||||
_, oldPods := dsc.getAllDaemonSetPods(ds, nodeToDaemonPods, hash)
|
||||
maxUnavailable, numUnavailable, err := dsc.getUnavailableNumbers(ds, nodeToDaemonPods)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't get unavailable numbers: %v", err)
|
||||
}
|
||||
oldAvailablePods, oldUnavailablePods := util.SplitByAvailablePods(ds.Spec.MinReadySeconds, oldPods)
|
||||
|
||||
// for oldPods delete all not running pods
|
||||
var oldPodsToDelete []string
|
||||
glog.V(4).Infof("Marking all unavailable old pods for deletion")
|
||||
for _, pod := range oldUnavailablePods {
|
||||
// Skip terminating pods. We won't delete them again
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
|
||||
oldPodsToDelete = append(oldPodsToDelete, pod.Name)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Marking old pods for deletion")
|
||||
for _, pod := range oldAvailablePods {
|
||||
if numUnavailable >= maxUnavailable {
|
||||
glog.V(4).Infof("Number of unavailable DaemonSet pods: %d, is equal to or exceeds allowed maximum: %d", numUnavailable, maxUnavailable)
|
||||
break
|
||||
}
|
||||
glog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
|
||||
oldPodsToDelete = append(oldPodsToDelete, pod.Name)
|
||||
numUnavailable++
|
||||
}
|
||||
return dsc.syncNodes(ds, oldPodsToDelete, []string{}, hash)
|
||||
}
|
||||
|
||||
// constructHistory finds all histories controlled by the given DaemonSet, and
|
||||
// update current history revision number, or create current history if need to.
|
||||
// It also deduplicates current history, and adds missing unique labels to existing histories.
|
||||
func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps.ControllerRevision, old []*apps.ControllerRevision, err error) {
|
||||
var histories []*apps.ControllerRevision
|
||||
var currentHistories []*apps.ControllerRevision
|
||||
histories, err = dsc.controlledHistories(ds)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for _, history := range histories {
|
||||
// Add the unique label if it's not already added to the history
|
||||
// We use history name instead of computing hash, so that we don't need to worry about hash collision
|
||||
if _, ok := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; !ok {
|
||||
toUpdate := history.DeepCopy()
|
||||
toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name
|
||||
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(toUpdate)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
// Compare histories with ds to separate cur and old history
|
||||
found := false
|
||||
found, err = Match(ds, history)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if found {
|
||||
currentHistories = append(currentHistories, history)
|
||||
} else {
|
||||
old = append(old, history)
|
||||
}
|
||||
}
|
||||
|
||||
currRevision := maxRevision(old) + 1
|
||||
switch len(currentHistories) {
|
||||
case 0:
|
||||
// Create a new history if the current one isn't found
|
||||
cur, err = dsc.snapshot(ds, currRevision)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
default:
|
||||
cur, err = dsc.dedupCurHistories(ds, currentHistories)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Update revision number if necessary
|
||||
if cur.Revision < currRevision {
|
||||
toUpdate := cur.DeepCopy()
|
||||
toUpdate.Revision = currRevision
|
||||
_, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(toUpdate)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return cur, old, err
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) cleanupHistory(ds *apps.DaemonSet, old []*apps.ControllerRevision) error {
|
||||
nodesToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
|
||||
}
|
||||
|
||||
toKeep := int(*ds.Spec.RevisionHistoryLimit)
|
||||
toKill := len(old) - toKeep
|
||||
if toKill <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find all hashes of live pods
|
||||
liveHashes := make(map[string]bool)
|
||||
for _, pods := range nodesToDaemonPods {
|
||||
for _, pod := range pods {
|
||||
if hash := pod.Labels[apps.DefaultDaemonSetUniqueLabelKey]; len(hash) > 0 {
|
||||
liveHashes[hash] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Find all live history with the above hashes
|
||||
liveHistory := make(map[string]bool)
|
||||
for _, history := range old {
|
||||
if hash := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; liveHashes[hash] {
|
||||
liveHistory[history.Name] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up old history from smallest to highest revision (from oldest to newest)
|
||||
sort.Sort(historiesByRevision(old))
|
||||
for _, history := range old {
|
||||
if toKill <= 0 {
|
||||
break
|
||||
}
|
||||
if liveHistory[history.Name] {
|
||||
continue
|
||||
}
|
||||
// Clean up
|
||||
err := dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Delete(history.Name, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
toKill--
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// maxRevision returns the max revision number of the given list of histories
|
||||
func maxRevision(histories []*apps.ControllerRevision) int64 {
|
||||
max := int64(0)
|
||||
for _, history := range histories {
|
||||
if history.Revision > max {
|
||||
max = history.Revision
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) dedupCurHistories(ds *apps.DaemonSet, curHistories []*apps.ControllerRevision) (*apps.ControllerRevision, error) {
|
||||
if len(curHistories) == 1 {
|
||||
return curHistories[0], nil
|
||||
}
|
||||
var maxRevision int64
|
||||
var keepCur *apps.ControllerRevision
|
||||
for _, cur := range curHistories {
|
||||
if cur.Revision >= maxRevision {
|
||||
keepCur = cur
|
||||
maxRevision = cur.Revision
|
||||
}
|
||||
}
|
||||
// Clean up duplicates and relabel pods
|
||||
for _, cur := range curHistories {
|
||||
if cur.Name == keepCur.Name {
|
||||
continue
|
||||
}
|
||||
// Relabel pods before dedup
|
||||
pods, err := dsc.getDaemonPods(ds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, pod := range pods {
|
||||
if pod.Labels[apps.DefaultDaemonSetUniqueLabelKey] != keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey] {
|
||||
toUpdate := pod.DeepCopy()
|
||||
if toUpdate.Labels == nil {
|
||||
toUpdate.Labels = make(map[string]string)
|
||||
}
|
||||
toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
_, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(toUpdate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
// Remove duplicates
|
||||
err = dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Delete(cur.Name, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return keepCur, nil
|
||||
}
|
||||
|
||||
// controlledHistories returns all ControllerRevisions controlled by the given DaemonSet.
|
||||
// This also reconciles ControllerRef by adopting/orphaning.
|
||||
// Note that returned histories are pointers to objects in the cache.
|
||||
// If you want to modify one, you need to deep-copy it first.
|
||||
func (dsc *DaemonSetsController) controlledHistories(ds *apps.DaemonSet) ([]*apps.ControllerRevision, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// List all histories to include those that don't match the selector anymore
|
||||
// but have a ControllerRef pointing to the controller.
|
||||
histories, err := dsc.historyLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If any adoptions are attempted, we should first recheck for deletion with
|
||||
// an uncached quorum read sometime after listing Pods (see #42639).
|
||||
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
|
||||
fresh, err := dsc.kubeClient.ExtensionsV1beta1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fresh.UID != ds.UID {
|
||||
return nil, fmt.Errorf("original DaemonSet %v/%v is gone: got uid %v, wanted %v", ds.Namespace, ds.Name, fresh.UID, ds.UID)
|
||||
}
|
||||
return fresh, nil
|
||||
})
|
||||
// Use ControllerRefManager to adopt/orphan as needed.
|
||||
cm := controller.NewControllerRevisionControllerRefManager(dsc.crControl, ds, selector, controllerKind, canAdoptFunc)
|
||||
return cm.ClaimControllerRevisions(histories)
|
||||
}
|
||||
|
||||
// Match check if the given DaemonSet's template matches the template stored in the given history.
|
||||
func Match(ds *apps.DaemonSet, history *apps.ControllerRevision) (bool, error) {
|
||||
patch, err := getPatch(ds)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return bytes.Equal(patch, history.Data.Raw), nil
|
||||
}
|
||||
|
||||
// getPatch returns a strategic merge patch that can be applied to restore a Daemonset to a
|
||||
// previous version. If the returned error is nil the patch is valid. The current state that we save is just the
|
||||
// PodSpecTemplate. We can modify this later to encompass more state (or less) and remain compatible with previously
|
||||
// recorded patches.
|
||||
func getPatch(ds *apps.DaemonSet) ([]byte, error) {
|
||||
dsBytes, err := json.Marshal(ds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var raw map[string]interface{}
|
||||
err = json.Unmarshal(dsBytes, &raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objCopy := make(map[string]interface{})
|
||||
specCopy := make(map[string]interface{})
|
||||
|
||||
// Create a patch of the DaemonSet that replaces spec.template
|
||||
spec := raw["spec"].(map[string]interface{})
|
||||
template := spec["template"].(map[string]interface{})
|
||||
specCopy["template"] = template
|
||||
template["$patch"] = "replace"
|
||||
objCopy["spec"] = specCopy
|
||||
patch, err := json.Marshal(objCopy)
|
||||
return patch, err
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*apps.ControllerRevision, error) {
|
||||
patch, err := getPatch(ds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := fmt.Sprint(controller.ComputeHash(&ds.Spec.Template, ds.Status.CollisionCount))
|
||||
name := ds.Name + "-" + rand.SafeEncodeString(hash)
|
||||
history := &apps.ControllerRevision{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ds.Namespace,
|
||||
Labels: labelsutil.CloneAndAddLabel(ds.Spec.Template.Labels, apps.DefaultDaemonSetUniqueLabelKey, hash),
|
||||
Annotations: ds.Annotations,
|
||||
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(ds, controllerKind)},
|
||||
},
|
||||
Data: runtime.RawExtension{Raw: patch},
|
||||
Revision: revision,
|
||||
}
|
||||
|
||||
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(history)
|
||||
if errors.IsAlreadyExists(err) {
|
||||
// TODO: Is it okay to get from historyLister?
|
||||
existedHistory, getErr := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Get(name, metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
return nil, getErr
|
||||
}
|
||||
// Check if we already created it
|
||||
done, err := Match(ds, existedHistory)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if done {
|
||||
return existedHistory, nil
|
||||
}
|
||||
|
||||
// Handle name collisions between different history
|
||||
// TODO: Is it okay to get from dsLister?
|
||||
currDS, getErr := dsc.kubeClient.ExtensionsV1beta1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
return nil, getErr
|
||||
}
|
||||
if currDS.Status.CollisionCount == nil {
|
||||
currDS.Status.CollisionCount = new(int32)
|
||||
}
|
||||
*currDS.Status.CollisionCount++
|
||||
_, updateErr := dsc.kubeClient.ExtensionsV1beta1().DaemonSets(ds.Namespace).UpdateStatus(currDS)
|
||||
if updateErr != nil {
|
||||
return nil, updateErr
|
||||
}
|
||||
glog.V(2).Infof("Found a hash collision for DaemonSet %q - bumping collisionCount to %d to resolve it", ds.Name, *currDS.Status.CollisionCount)
|
||||
return nil, err
|
||||
}
|
||||
return history, err
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *apps.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod, hash string) ([]*v1.Pod, []*v1.Pod) {
|
||||
var newPods []*v1.Pod
|
||||
var oldPods []*v1.Pod
|
||||
|
||||
for _, pods := range nodeToDaemonPods {
|
||||
for _, pod := range pods {
|
||||
// If the returned error is not nil we have a parse error.
|
||||
// The controller handles this via the hash.
|
||||
generation, err := util.GetTemplateGeneration(ds)
|
||||
if err != nil {
|
||||
generation = nil
|
||||
}
|
||||
if util.IsPodUpdated(pod, hash, generation) {
|
||||
newPods = append(newPods, pod)
|
||||
} else {
|
||||
oldPods = append(oldPods, pod)
|
||||
}
|
||||
}
|
||||
}
|
||||
return newPods, oldPods
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) (int, int, error) {
|
||||
glog.V(4).Infof("Getting unavailable numbers")
|
||||
// TODO: get nodeList once in syncDaemonSet and pass it to other functions
|
||||
nodeList, err := dsc.nodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("couldn't get list of nodes during rolling update of daemon set %#v: %v", ds, err)
|
||||
}
|
||||
|
||||
var numUnavailable, desiredNumberScheduled int
|
||||
for i := range nodeList {
|
||||
node := nodeList[i]
|
||||
wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
if !wantToRun {
|
||||
continue
|
||||
}
|
||||
desiredNumberScheduled++
|
||||
daemonPods, exists := nodeToDaemonPods[node.Name]
|
||||
if !exists {
|
||||
numUnavailable++
|
||||
continue
|
||||
}
|
||||
available := false
|
||||
for _, pod := range daemonPods {
|
||||
//for the purposes of update we ensure that the Pod is both available and not terminating
|
||||
if podutil.IsPodAvailable(pod, ds.Spec.MinReadySeconds, metav1.Now()) && pod.DeletionTimestamp == nil {
|
||||
available = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !available {
|
||||
numUnavailable++
|
||||
}
|
||||
}
|
||||
maxUnavailable, err := intstrutil.GetValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, desiredNumberScheduled, true)
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("Invalid value for MaxUnavailable: %v", err)
|
||||
}
|
||||
glog.V(4).Infof(" DaemonSet %s/%s, maxUnavailable: %d, numUnavailable: %d", ds.Namespace, ds.Name, maxUnavailable, numUnavailable)
|
||||
return maxUnavailable, numUnavailable, nil
|
||||
}
|
||||
|
||||
type historiesByRevision []*apps.ControllerRevision
|
||||
|
||||
func (h historiesByRevision) Len() int { return len(h) }
|
||||
func (h historiesByRevision) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
|
||||
func (h historiesByRevision) Less(i, j int) bool {
|
||||
return h[i].Revision < h[j].Revision
|
||||
}
|
307
vendor/k8s.io/kubernetes/pkg/controller/daemon/update_test.go
generated
vendored
Normal file
307
vendor/k8s.io/kubernetes/pkg/controller/daemon/update_test.go
generated
vendored
Normal file
@@ -0,0 +1,307 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
func TestDaemonSetUpdatesPods(t *testing.T) {
|
||||
ds := newDaemonSet("foo")
|
||||
manager, podControl, _, err := newTestController(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
maxUnavailable := 2
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
|
||||
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
|
||||
intStr := intstr.FromInt(maxUnavailable)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1, 0)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0)
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
}
|
||||
|
||||
func TestDaemonSetUpdatesWhenNewPosIsNotReady(t *testing.T) {
|
||||
ds := newDaemonSet("foo")
|
||||
manager, podControl, _, err := newTestController(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
maxUnavailable := 3
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
|
||||
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
|
||||
intStr := intstr.FromInt(maxUnavailable)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
// new pods are not ready numUnavailable == maxUnavailable
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
}
|
||||
|
||||
func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) {
|
||||
ds := newDaemonSet("foo")
|
||||
manager, podControl, _, err := newTestController(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
maxUnavailable := 3
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
|
||||
|
||||
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
|
||||
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
|
||||
intStr := intstr.FromInt(maxUnavailable)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
// all old pods are unavailable so should be removed
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 5, 0)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
}
|
||||
|
||||
func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) {
|
||||
ds := newDaemonSet("foo")
|
||||
manager, podControl, _, err := newTestController(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
maxUnavailable := 3
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
|
||||
|
||||
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
|
||||
intStr := intstr.FromInt(maxUnavailable)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
// template is not changed no pod should be removed
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
}
|
||||
|
||||
func TestGetUnavailableNumbers(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
Manager *daemonSetsController
|
||||
ds *apps.DaemonSet
|
||||
nodeToPods map[string][]*v1.Pod
|
||||
maxUnavailable int
|
||||
numUnavailable int
|
||||
Err error
|
||||
}{
|
||||
{
|
||||
name: "No nodes",
|
||||
Manager: func() *daemonSetsController {
|
||||
manager, _, _, err := newTestController()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
return manager
|
||||
}(),
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
intStr := intstr.FromInt(0)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: make(map[string][]*v1.Pod),
|
||||
maxUnavailable: 0,
|
||||
numUnavailable: 0,
|
||||
},
|
||||
{
|
||||
name: "Two nodes with ready pods",
|
||||
Manager: func() *daemonSetsController {
|
||||
manager, _, _, err := newTestController()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
addNodes(manager.nodeStore, 0, 2, nil)
|
||||
return manager
|
||||
}(),
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
intStr := intstr.FromInt(1)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: func() map[string][]*v1.Pod {
|
||||
mapping := make(map[string][]*v1.Pod)
|
||||
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
|
||||
pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
|
||||
markPodReady(pod0)
|
||||
markPodReady(pod1)
|
||||
mapping["node-0"] = []*v1.Pod{pod0}
|
||||
mapping["node-1"] = []*v1.Pod{pod1}
|
||||
return mapping
|
||||
}(),
|
||||
maxUnavailable: 1,
|
||||
numUnavailable: 0,
|
||||
},
|
||||
{
|
||||
name: "Two nodes, one node without pods",
|
||||
Manager: func() *daemonSetsController {
|
||||
manager, _, _, err := newTestController()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
addNodes(manager.nodeStore, 0, 2, nil)
|
||||
return manager
|
||||
}(),
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
intStr := intstr.FromInt(0)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: func() map[string][]*v1.Pod {
|
||||
mapping := make(map[string][]*v1.Pod)
|
||||
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
|
||||
markPodReady(pod0)
|
||||
mapping["node-0"] = []*v1.Pod{pod0}
|
||||
return mapping
|
||||
}(),
|
||||
maxUnavailable: 0,
|
||||
numUnavailable: 1,
|
||||
},
|
||||
{
|
||||
name: "Two nodes with pods, MaxUnavailable in percents",
|
||||
Manager: func() *daemonSetsController {
|
||||
manager, _, _, err := newTestController()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
addNodes(manager.nodeStore, 0, 2, nil)
|
||||
return manager
|
||||
}(),
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
intStr := intstr.FromString("50%")
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: func() map[string][]*v1.Pod {
|
||||
mapping := make(map[string][]*v1.Pod)
|
||||
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
|
||||
pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
|
||||
markPodReady(pod0)
|
||||
markPodReady(pod1)
|
||||
mapping["node-0"] = []*v1.Pod{pod0}
|
||||
mapping["node-1"] = []*v1.Pod{pod1}
|
||||
return mapping
|
||||
}(),
|
||||
maxUnavailable: 1,
|
||||
numUnavailable: 0,
|
||||
},
|
||||
{
|
||||
name: "Two nodes with pods, MaxUnavailable in percents, pod terminating",
|
||||
Manager: func() *daemonSetsController {
|
||||
manager, _, _, err := newTestController()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
addNodes(manager.nodeStore, 0, 2, nil)
|
||||
return manager
|
||||
}(),
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
intStr := intstr.FromString("50%")
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: func() map[string][]*v1.Pod {
|
||||
mapping := make(map[string][]*v1.Pod)
|
||||
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
|
||||
pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
|
||||
now := metav1.Now()
|
||||
markPodReady(pod0)
|
||||
markPodReady(pod1)
|
||||
pod1.DeletionTimestamp = &now
|
||||
mapping["node-0"] = []*v1.Pod{pod0}
|
||||
mapping["node-1"] = []*v1.Pod{pod1}
|
||||
return mapping
|
||||
}(),
|
||||
maxUnavailable: 1,
|
||||
numUnavailable: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
c.Manager.dsStore.Add(c.ds)
|
||||
maxUnavailable, numUnavailable, err := c.Manager.getUnavailableNumbers(c.ds, c.nodeToPods)
|
||||
if err != nil && c.Err != nil {
|
||||
if c.Err != err {
|
||||
t.Errorf("Test case: %s. Expected error: %v but got: %v", c.name, c.Err, err)
|
||||
}
|
||||
} else if err != nil {
|
||||
t.Errorf("Test case: %s. Unexpected error: %v", c.name, err)
|
||||
} else if maxUnavailable != c.maxUnavailable || numUnavailable != c.numUnavailable {
|
||||
t.Errorf("Test case: %s. Wrong values. maxUnavailable: %d, expected: %d, numUnavailable: %d. expected: %d", c.name, maxUnavailable, c.maxUnavailable, numUnavailable, c.numUnavailable)
|
||||
}
|
||||
}
|
||||
}
|
55
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/BUILD
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/BUILD
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["daemonset_util.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/daemon/util",
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["daemonset_util_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
250
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util.go
generated
vendored
Normal file
250
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util.go
generated
vendored
Normal file
@@ -0,0 +1,250 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
// GetTemplateGeneration gets the template generation associated with a v1.DaemonSet by extracting it from the
|
||||
// deprecated annotation. If no annotation is found nil is returned. If the annotation is found and fails to parse
|
||||
// nil is returned with an error. If the generation can be parsed from the annotation, a pointer to the parsed int64
|
||||
// value is returned.
|
||||
func GetTemplateGeneration(ds *apps.DaemonSet) (*int64, error) {
|
||||
annotation, found := ds.Annotations[apps.DeprecatedTemplateGeneration]
|
||||
if !found {
|
||||
return nil, nil
|
||||
}
|
||||
generation, err := strconv.ParseInt(annotation, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &generation, nil
|
||||
}
|
||||
|
||||
// CreatePodTemplate returns copy of provided template with additional
|
||||
// label which contains templateGeneration (for backward compatibility),
|
||||
// hash of provided template and sets default daemon tolerations.
|
||||
func CreatePodTemplate(template v1.PodTemplateSpec, generation *int64, hash string) v1.PodTemplateSpec {
|
||||
newTemplate := *template.DeepCopy()
|
||||
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
|
||||
// Add infinite toleration for taint notReady:NoExecute here
|
||||
// to survive taint-based eviction enforced by NodeController
|
||||
// when node turns not ready.
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
})
|
||||
|
||||
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
|
||||
// Add infinite toleration for taint unreachable:NoExecute here
|
||||
// to survive taint-based eviction enforced by NodeController
|
||||
// when node turns unreachable.
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
})
|
||||
|
||||
// According to TaintNodesByCondition feature, all DaemonSet pods should tolerate
|
||||
// MemoryPressure and DisPressure taints, and the critical pods should tolerate
|
||||
// OutOfDisk taint.
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeDiskPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeMemoryPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
// TODO(#48843) OutOfDisk taints will be removed in 1.10
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) &&
|
||||
kubelettypes.IsCritical(newTemplate.Namespace, newTemplate.Annotations) {
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeOutOfDisk,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
})
|
||||
}
|
||||
|
||||
if newTemplate.ObjectMeta.Labels == nil {
|
||||
newTemplate.ObjectMeta.Labels = make(map[string]string)
|
||||
}
|
||||
if generation != nil {
|
||||
newTemplate.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey] = fmt.Sprint(*generation)
|
||||
}
|
||||
// TODO: do we need to validate if the DaemonSet is RollingUpdate or not?
|
||||
if len(hash) > 0 {
|
||||
newTemplate.ObjectMeta.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = hash
|
||||
}
|
||||
return newTemplate
|
||||
}
|
||||
|
||||
// IsPodUpdated checks if pod contains label value that either matches templateGeneration or hash
|
||||
func IsPodUpdated(pod *v1.Pod, hash string, dsTemplateGeneration *int64) bool {
|
||||
// Compare with hash to see if the pod is updated, need to maintain backward compatibility of templateGeneration
|
||||
templateMatches := dsTemplateGeneration != nil &&
|
||||
pod.Labels[extensions.DaemonSetTemplateGenerationKey] == fmt.Sprint(dsTemplateGeneration)
|
||||
hashMatches := len(hash) > 0 && pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey] == hash
|
||||
return hashMatches || templateMatches
|
||||
}
|
||||
|
||||
// SplitByAvailablePods splits provided daemon set pods by availability
|
||||
func SplitByAvailablePods(minReadySeconds int32, pods []*v1.Pod) ([]*v1.Pod, []*v1.Pod) {
|
||||
unavailablePods := []*v1.Pod{}
|
||||
availablePods := []*v1.Pod{}
|
||||
for _, pod := range pods {
|
||||
if podutil.IsPodAvailable(pod, minReadySeconds, metav1.Now()) {
|
||||
availablePods = append(availablePods, pod)
|
||||
} else {
|
||||
unavailablePods = append(unavailablePods, pod)
|
||||
}
|
||||
}
|
||||
return availablePods, unavailablePods
|
||||
}
|
||||
|
||||
// ReplaceDaemonSetPodNodeNameNodeAffinity replaces the RequiredDuringSchedulingIgnoredDuringExecution
|
||||
// NodeAffinity of the given affinity with a new NodeAffinity that selects the given nodeName.
|
||||
// Note that this function assumes that no NodeAffinity conflicts with the selected nodeName.
|
||||
func ReplaceDaemonSetPodNodeNameNodeAffinity(affinity *v1.Affinity, nodename string) *v1.Affinity {
|
||||
nodeSelReq := v1.NodeSelectorRequirement{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{nodename},
|
||||
}
|
||||
|
||||
nodeSelector := &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{nodeSelReq},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if affinity == nil {
|
||||
return &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: nodeSelector,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if affinity.NodeAffinity == nil {
|
||||
affinity.NodeAffinity = &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: nodeSelector,
|
||||
}
|
||||
return affinity
|
||||
}
|
||||
|
||||
nodeAffinity := affinity.NodeAffinity
|
||||
|
||||
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
|
||||
nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = nodeSelector
|
||||
return affinity
|
||||
}
|
||||
|
||||
// Replace node selector with the new one.
|
||||
nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{nodeSelReq},
|
||||
},
|
||||
}
|
||||
|
||||
return affinity
|
||||
}
|
||||
|
||||
// AppendNoScheduleTolerationIfNotExist appends unschedulable toleration to `.spec` if not exist; otherwise,
|
||||
// no changes to `.spec.tolerations`.
|
||||
func AppendNoScheduleTolerationIfNotExist(tolerations []v1.Toleration) []v1.Toleration {
|
||||
unschedulableToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
unschedulableTaintExist := false
|
||||
|
||||
for _, t := range tolerations {
|
||||
if apiequality.Semantic.DeepEqual(t, unschedulableToleration) {
|
||||
unschedulableTaintExist = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !unschedulableTaintExist {
|
||||
tolerations = append(tolerations, unschedulableToleration)
|
||||
}
|
||||
|
||||
return tolerations
|
||||
}
|
||||
|
||||
// GetTargetNodeName get the target node name of DaemonSet pods. If `.spec.NodeName` is not empty (nil),
|
||||
// return `.spec.NodeName`; otherwise, retrieve node name of pending pods from NodeAffinity. Return error
|
||||
// if failed to retrieve node name from `.spec.NodeName` and NodeAffinity.
|
||||
func GetTargetNodeName(pod *v1.Pod) (string, error) {
|
||||
if len(pod.Spec.NodeName) != 0 {
|
||||
return pod.Spec.NodeName, nil
|
||||
}
|
||||
|
||||
// If ScheduleDaemonSetPods was enabled before, retrieve node name of unscheduled pods from NodeAffinity
|
||||
if pod.Spec.Affinity == nil ||
|
||||
pod.Spec.Affinity.NodeAffinity == nil ||
|
||||
pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
|
||||
return "", fmt.Errorf("no spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution for pod %s/%s",
|
||||
pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
terms := pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
|
||||
if len(terms) < 1 {
|
||||
return "", fmt.Errorf("no nodeSelectorTerms in requiredDuringSchedulingIgnoredDuringExecution of pod %s/%s",
|
||||
pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
for _, term := range terms {
|
||||
for _, exp := range term.MatchFields {
|
||||
if exp.Key == algorithm.NodeFieldSelectorKeyNodeName &&
|
||||
exp.Operator == v1.NodeSelectorOpIn {
|
||||
if len(exp.Values) != 1 {
|
||||
return "", fmt.Errorf("the matchFields value of '%s' is not unique for pod %s/%s",
|
||||
algorithm.NodeFieldSelectorKeyNodeName, pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
return exp.Values[0], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no node name found for pod %s/%s", pod.Namespace, pod.Name)
|
||||
}
|
599
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util_test.go
generated
vendored
Normal file
599
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util_test.go
generated
vendored
Normal file
@@ -0,0 +1,599 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: testapi.Extensions.GroupVersion().String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: label,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: nodeName,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "foo/bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod.Name = podName
|
||||
return pod
|
||||
}
|
||||
|
||||
func TestIsPodUpdated(t *testing.T) {
|
||||
templateGeneration := int64Ptr(12345)
|
||||
badGeneration := int64Ptr(12345)
|
||||
hash := "55555"
|
||||
labels := map[string]string{extensions.DaemonSetTemplateGenerationKey: fmt.Sprint(templateGeneration), extensions.DefaultDaemonSetUniqueLabelKey: hash}
|
||||
labelsNoHash := map[string]string{extensions.DaemonSetTemplateGenerationKey: fmt.Sprint(templateGeneration)}
|
||||
tests := []struct {
|
||||
test string
|
||||
templateGeneration *int64
|
||||
pod *v1.Pod
|
||||
hash string
|
||||
isUpdated bool
|
||||
}{
|
||||
{
|
||||
"templateGeneration and hash both match",
|
||||
templateGeneration,
|
||||
newPod("pod1", "node1", labels),
|
||||
hash,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"templateGeneration matches, hash doesn't",
|
||||
templateGeneration,
|
||||
newPod("pod1", "node1", labels),
|
||||
hash + "123",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"templateGeneration matches, no hash label, has hash",
|
||||
templateGeneration,
|
||||
newPod("pod1", "node1", labelsNoHash),
|
||||
hash,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"templateGeneration matches, no hash label, no hash",
|
||||
templateGeneration,
|
||||
newPod("pod1", "node1", labelsNoHash),
|
||||
"",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"templateGeneration matches, has hash label, no hash",
|
||||
templateGeneration,
|
||||
newPod("pod1", "node1", labels),
|
||||
"",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"templateGeneration doesn't match, hash does",
|
||||
badGeneration,
|
||||
newPod("pod1", "node1", labels),
|
||||
hash,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"templateGeneration and hash don't match",
|
||||
badGeneration,
|
||||
newPod("pod1", "node1", labels),
|
||||
hash + "123",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"empty labels, no hash",
|
||||
templateGeneration,
|
||||
newPod("pod1", "node1", map[string]string{}),
|
||||
"",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"empty labels",
|
||||
templateGeneration,
|
||||
newPod("pod1", "node1", map[string]string{}),
|
||||
hash,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"no labels",
|
||||
templateGeneration,
|
||||
newPod("pod1", "node1", nil),
|
||||
hash,
|
||||
false,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
updated := IsPodUpdated(test.pod, test.hash, test.templateGeneration)
|
||||
if updated != test.isUpdated {
|
||||
t.Errorf("%s: IsPodUpdated returned wrong value. Expected %t, got %t", test.test, test.isUpdated, updated)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreatePodTemplate(t *testing.T) {
|
||||
tests := []struct {
|
||||
templateGeneration *int64
|
||||
hash string
|
||||
expectUniqueLabel bool
|
||||
}{
|
||||
{int64Ptr(1), "", false},
|
||||
{int64Ptr(2), "3242341807", true},
|
||||
}
|
||||
for _, test := range tests {
|
||||
podTemplateSpec := v1.PodTemplateSpec{}
|
||||
newPodTemplate := CreatePodTemplate(podTemplateSpec, test.templateGeneration, test.hash)
|
||||
val, exists := newPodTemplate.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey]
|
||||
if !exists || val != fmt.Sprint(*test.templateGeneration) {
|
||||
t.Errorf("Expected podTemplateSpec to have generation label value: %d, got: %s", *test.templateGeneration, val)
|
||||
}
|
||||
val, exists = newPodTemplate.ObjectMeta.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
if test.expectUniqueLabel && (!exists || val != test.hash) {
|
||||
t.Errorf("Expected podTemplateSpec to have hash label value: %s, got: %s", test.hash, val)
|
||||
}
|
||||
if !test.expectUniqueLabel && exists {
|
||||
t.Errorf("Expected podTemplateSpec to have no hash label, got: %s", val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func int64Ptr(i int) *int64 {
|
||||
li := int64(i)
|
||||
return &li
|
||||
}
|
||||
|
||||
func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
tests := []struct {
|
||||
affinity *v1.Affinity
|
||||
hostname string
|
||||
expected *v1.Affinity
|
||||
}{
|
||||
{
|
||||
affinity: nil,
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: kubeletapis.LabelHostname,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
|
||||
{
|
||||
Preference: v1.NodeSelectorTerm{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: kubeletapis.LabelHostname,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
|
||||
{
|
||||
Preference: v1.NodeSelectorTerm{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: kubeletapis.LabelHostname,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1", "host_2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
affinity: nil,
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "hostname",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpNotIn,
|
||||
Values: []string{"host_2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
// NOTE: Only `metadata.name` is valid key in `MatchFields` in 1.11;
|
||||
// added this case for compatibility: the feature works as normal
|
||||
// when new Keys introduced.
|
||||
Key: "metadata.foo",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
got := ReplaceDaemonSetPodNodeNameNodeAffinity(test.affinity, test.hostname)
|
||||
if !reflect.DeepEqual(test.expected, got) {
|
||||
t.Errorf("Failed to append NodeAffinity in case %d, got: %v, expected: %v",
|
||||
i, got, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func forEachFeatureGate(t *testing.T, tf func(t *testing.T), gates ...utilfeature.Feature) {
|
||||
for _, fg := range gates {
|
||||
func() {
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled(fg)
|
||||
defer func() {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled))
|
||||
}()
|
||||
|
||||
for _, f := range []bool{true, false} {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f))
|
||||
t.Run(fmt.Sprintf("%v (%t)", fg, f), tf)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetTargetNodeName(t *testing.T) {
|
||||
testFun := func(t *testing.T) {
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
nodeName string
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "node-1",
|
||||
},
|
||||
},
|
||||
nodeName: "node-1",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node-1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeName: "node-1",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod3",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node-1", "node-2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod4",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{},
|
||||
},
|
||||
expectedErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got, err := GetTargetNodeName(test.pod)
|
||||
if test.expectedErr != (err != nil) {
|
||||
t.Errorf("Unexpected error, expectedErr: %v, err: %v", test.expectedErr, err)
|
||||
} else if !test.expectedErr {
|
||||
if test.nodeName != got {
|
||||
t.Errorf("Failed to get target node name, got: %v, expected: %v", got, test.nodeName)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
forEachFeatureGate(t, testFun, features.ScheduleDaemonSetPods)
|
||||
}
|
Reference in New Issue
Block a user