Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -39,8 +39,11 @@ import (
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/client-go/util/retry"
"k8s.io/kubernetes/pkg/api/legacyscheme"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/daemon"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler"
@@ -48,7 +51,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
"k8s.io/kubernetes/pkg/scheduler/factory"
"k8s.io/kubernetes/pkg/util/metrics"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
"k8s.io/kubernetes/test/integration/framework"
)
@@ -65,13 +68,13 @@ func setup(t *testing.T) (*httptest.Server, framework.CloseFunc, *daemon.DaemonS
}
resyncPeriod := 12 * time.Hour
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "daemonset-informers")), resyncPeriod)
metrics.UnregisterMetricAndUntrackRateLimiterUsage("daemon_controller")
dc, err := daemon.NewDaemonSetsController(
informers.Apps().V1().DaemonSets(),
informers.Apps().V1().ControllerRevisions(),
informers.Core().V1().Pods(),
informers.Core().V1().Nodes(),
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "daemonset-controller")),
flowcontrol.NewBackOff(5*time.Second, 15*time.Minute),
)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
@@ -91,23 +94,27 @@ func setupScheduler(
return
}
schedulerConfigFactory := factory.NewConfigFactory(
v1.DefaultSchedulerName,
cs,
informerFactory.Core().V1().Nodes(),
informerFactory.Core().V1().Pods(),
informerFactory.Core().V1().PersistentVolumes(),
informerFactory.Core().V1().PersistentVolumeClaims(),
informerFactory.Core().V1().ReplicationControllers(),
informerFactory.Extensions().V1beta1().ReplicaSets(),
informerFactory.Apps().V1beta1().StatefulSets(),
informerFactory.Core().V1().Services(),
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
informerFactory.Storage().V1().StorageClasses(),
v1.DefaultHardPodAffinitySymmetricWeight,
true,
false,
)
// Enable Features.
algorithmprovider.ApplyFeatureGates()
schedulerConfigFactory := factory.NewConfigFactory(&factory.ConfigFactoryArgs{
SchedulerName: v1.DefaultSchedulerName,
Client: cs,
NodeInformer: informerFactory.Core().V1().Nodes(),
PodInformer: informerFactory.Core().V1().Pods(),
PvInformer: informerFactory.Core().V1().PersistentVolumes(),
PvcInformer: informerFactory.Core().V1().PersistentVolumeClaims(),
ReplicationControllerInformer: informerFactory.Core().V1().ReplicationControllers(),
ReplicaSetInformer: informerFactory.Apps().V1().ReplicaSets(),
StatefulSetInformer: informerFactory.Apps().V1().StatefulSets(),
ServiceInformer: informerFactory.Core().V1().Services(),
PdbInformer: informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
StorageClassInformer: informerFactory.Storage().V1().StorageClasses(),
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
EnableEquivalenceClassCache: true,
DisablePreemption: false,
PercentageOfNodesToScore: 100,
})
schedulerConfig, err := schedulerConfigFactory.Create()
if err != nil {
@@ -271,11 +278,13 @@ func newNode(name string, label map[string]string) *v1.Node {
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: label,
Namespace: metav1.NamespaceDefault,
Namespace: metav1.NamespaceNone,
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}},
Allocatable: v1.ResourceList{v1.ResourcePods: resource.MustParse("100")},
// minimum version required to use matchFields
NodeInfo: v1.NodeSystemInfo{KubeletVersion: "v1.11.0"},
},
}
}
@@ -293,7 +302,8 @@ func validateDaemonSetPodsAndMarkReady(
podClient corev1typed.PodInterface,
podInformer cache.SharedIndexInformer,
numberPods int,
t *testing.T) {
t *testing.T,
) {
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
objects := podInformer.GetIndexer().List()
if len(objects) != numberPods {
@@ -372,6 +382,52 @@ func waitForPodsCreated(podInformer cache.SharedIndexInformer, num int) error {
})
}
func waitForDaemonSetAndControllerRevisionCreated(c clientset.Interface, name string, namespace string) error {
return wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) {
ds, err := c.AppsV1().DaemonSets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return false, err
}
if ds == nil {
return false, nil
}
revs, err := c.AppsV1().ControllerRevisions(namespace).List(metav1.ListOptions{})
if err != nil {
return false, err
}
if revs.Size() == 0 {
return false, nil
}
for _, rev := range revs.Items {
for _, oref := range rev.OwnerReferences {
if oref.Kind == "DaemonSet" && oref.UID == ds.UID {
return true, nil
}
}
}
return false, nil
})
}
func hashAndNameForDaemonSet(ds *apps.DaemonSet) (string, string) {
hash := fmt.Sprint(controller.ComputeHash(&ds.Spec.Template, ds.Status.CollisionCount))
name := ds.Name + "-" + hash
return hash, name
}
func validateDaemonSetCollisionCount(dsClient appstyped.DaemonSetInterface, dsName string, expCount int32, t *testing.T) {
ds, err := dsClient.Get(dsName, metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to look up DaemonSet: %v", err)
}
collisionCount := ds.Status.CollisionCount
if *collisionCount != expCount {
t.Fatalf("Expected collisionCount to be %d, but found %d", expCount, *collisionCount)
}
}
func validateDaemonSetStatus(
dsClient appstyped.DaemonSetInterface,
dsName string,
@@ -413,16 +469,36 @@ func validateFailedPlacementEvent(eventClient corev1typed.EventInterface, t *tes
}
}
func updateDS(t *testing.T, dsClient appstyped.DaemonSetInterface, dsName string, updateFunc func(*apps.DaemonSet)) *apps.DaemonSet {
var ds *apps.DaemonSet
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
newDS, err := dsClient.Get(dsName, metav1.GetOptions{})
if err != nil {
return err
}
updateFunc(newDS)
ds, err = dsClient.Update(newDS)
return err
}); err != nil {
t.Fatalf("Failed to update DaemonSet: %v", err)
}
return ds
}
func forEachFeatureGate(t *testing.T, tf func(t *testing.T)) {
for _, fg := range featureGates() {
func() {
enabled := utilfeature.DefaultFeatureGate.Enabled(fg)
defer func() {
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled))
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", fg, enabled)
}
}()
for _, f := range []bool{true, false} {
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f))
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", fg, f)
}
t.Run(fmt.Sprintf("%v (%t)", fg, f), tf)
}
}()
@@ -630,7 +706,23 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
})
}
func setFeatureGate(t *testing.T, feature utilfeature.Feature, enabled bool) {
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", feature, enabled)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t: %v", feature, enabled, err)
}
}
// When ScheduleDaemonSetPods is disabled, DaemonSets should not launch onto nodes with insufficient capacity.
// Look for TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled, we don't need this test anymore.
func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods)
// Rollback feature gate.
defer func() {
if enabled {
setFeatureGate(t, features.ScheduleDaemonSetPods, true)
}
}()
setFeatureGate(t, features.ScheduleDaemonSetPods, false)
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
server, closeFn, dc, informers, clientset := setup(t)
defer closeFn()
@@ -673,11 +765,15 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
func TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods)
defer func() {
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
features.ScheduleDaemonSetPods, enabled))
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
features.ScheduleDaemonSetPods, enabled)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", features.ScheduleDaemonSetPods, enabled)
}
}()
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.ScheduleDaemonSetPods, true))
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.ScheduleDaemonSetPods, true)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", features.ScheduleDaemonSetPods, true)
}
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
server, closeFn, dc, informers, clientset := setup(t)
@@ -740,3 +836,261 @@ func TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled(t *testing.T)
validateDaemonSetStatus(dsClient, ds.Name, 1, t)
})
}
// TestLaunchWithHashCollision tests that a DaemonSet can be updated even if there is a
// hash collision with an existing ControllerRevision
func TestLaunchWithHashCollision(t *testing.T) {
server, closeFn, dc, informers, clientset := setup(t)
defer closeFn()
ns := framework.CreateTestingNamespace("one-node-daemonset-test", server, t)
defer framework.DeleteTestingNamespace(ns, server, t)
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
podInformer := informers.Core().V1().Pods().Informer()
nodeClient := clientset.CoreV1().Nodes()
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
go dc.Run(1, stopCh)
setupScheduler(t, clientset, informers, stopCh)
// Create single node
_, err := nodeClient.Create(newNode("single-node", nil))
if err != nil {
t.Fatalf("Failed to create node: %v", err)
}
// Create new DaemonSet with RollingUpdate strategy
orgDs := newDaemonSet("foo", ns.Name)
oneIntString := intstr.FromInt(1)
orgDs.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{
Type: apps.RollingUpdateDaemonSetStrategyType,
RollingUpdate: &apps.RollingUpdateDaemonSet{
MaxUnavailable: &oneIntString,
},
}
ds, err := dsClient.Create(orgDs)
if err != nil {
t.Fatalf("Failed to create DaemonSet: %v", err)
}
// Wait for the DaemonSet to be created before proceeding
err = waitForDaemonSetAndControllerRevisionCreated(clientset, ds.Name, ds.Namespace)
if err != nil {
t.Fatalf("Failed to create DaemonSet: %v", err)
}
ds, err = dsClient.Get(ds.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to get DaemonSet: %v", err)
}
var orgCollisionCount int32
if ds.Status.CollisionCount != nil {
orgCollisionCount = *ds.Status.CollisionCount
}
// Look up the ControllerRevision for the DaemonSet
_, name := hashAndNameForDaemonSet(ds)
revision, err := clientset.AppsV1().ControllerRevisions(ds.Namespace).Get(name, metav1.GetOptions{})
if err != nil || revision == nil {
t.Fatalf("Failed to look up ControllerRevision: %v", err)
}
// Create a "fake" ControllerRevision that we know will create a hash collision when we make
// the next update
one := int64(1)
ds.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
newHash, newName := hashAndNameForDaemonSet(ds)
newRevision := &apps.ControllerRevision{
ObjectMeta: metav1.ObjectMeta{
Name: newName,
Namespace: ds.Namespace,
Labels: labelsutil.CloneAndAddLabel(ds.Spec.Template.Labels, apps.DefaultDaemonSetUniqueLabelKey, newHash),
Annotations: ds.Annotations,
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(ds, apps.SchemeGroupVersion.WithKind("DaemonSet"))},
},
Data: revision.Data,
Revision: revision.Revision + 1,
}
_, err = clientset.AppsV1().ControllerRevisions(ds.Namespace).Create(newRevision)
if err != nil {
t.Fatalf("Failed to create ControllerRevision: %v", err)
}
// Make an update of the DaemonSet which we know will create a hash collision when
// the next ControllerRevision is created.
ds = updateDS(t, dsClient, ds.Name, func(updateDS *apps.DaemonSet) {
updateDS.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
})
// Wait for any pod with the latest Spec to exist
err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) {
objects := podInformer.GetIndexer().List()
for _, object := range objects {
pod := object.(*v1.Pod)
if *pod.Spec.TerminationGracePeriodSeconds == *ds.Spec.Template.Spec.TerminationGracePeriodSeconds {
return true, nil
}
}
return false, nil
})
if err != nil {
t.Fatalf("Failed to wait for Pods with the latest Spec to be created: %v", err)
}
validateDaemonSetCollisionCount(dsClient, ds.Name, orgCollisionCount+1, t)
}
// TestTaintedNode tests that no matter "ScheduleDaemonSetPods" feature is enabled or not
// tainted node isn't expected to have pod scheduled
func TestTaintedNode(t *testing.T) {
forEachFeatureGate(t, func(t *testing.T) {
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
server, closeFn, dc, informers, clientset := setup(t)
defer closeFn()
ns := framework.CreateTestingNamespace("tainted-node", server, t)
defer framework.DeleteTestingNamespace(ns, server, t)
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
podClient := clientset.CoreV1().Pods(ns.Name)
podInformer := informers.Core().V1().Pods().Informer()
nodeClient := clientset.CoreV1().Nodes()
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
go dc.Run(5, stopCh)
// Start Scheduler
setupScheduler(t, clientset, informers, stopCh)
ds := newDaemonSet("foo", ns.Name)
ds.Spec.UpdateStrategy = *strategy
ds, err := dsClient.Create(ds)
if err != nil {
t.Fatalf("Failed to create DaemonSet: %v", err)
}
defer cleanupDaemonSets(t, clientset, ds)
nodeWithTaint := newNode("node-with-taint", nil)
nodeWithTaint.Spec.Taints = []v1.Taint{{Key: "key1", Value: "val1", Effect: "NoSchedule"}}
_, err = nodeClient.Create(nodeWithTaint)
if err != nil {
t.Fatalf("Failed to create nodeWithTaint: %v", err)
}
nodeWithoutTaint := newNode("node-without-taint", nil)
_, err = nodeClient.Create(nodeWithoutTaint)
if err != nil {
t.Fatalf("Failed to create nodeWithoutTaint: %v", err)
}
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t)
validateDaemonSetStatus(dsClient, ds.Name, 1, t)
// remove taint from nodeWithTaint
nodeWithTaint, err = nodeClient.Get("node-with-taint", metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to retrieve nodeWithTaint: %v", err)
}
nodeWithTaintCopy := nodeWithTaint.DeepCopy()
nodeWithTaintCopy.Spec.Taints = []v1.Taint{}
_, err = nodeClient.Update(nodeWithTaintCopy)
if err != nil {
t.Fatalf("Failed to update nodeWithTaint: %v", err)
}
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 2, t)
validateDaemonSetStatus(dsClient, ds.Name, 2, t)
})
})
}
// TestUnschedulableNodeDaemonDoesLaunchPod tests that the DaemonSet Pods can still be scheduled
// to the Unschedulable nodes when TaintNodesByCondition are enabled.
func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) {
enabledTaint := utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition)
defer func() {
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
features.TaintNodesByCondition, enabledTaint)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", features.TaintNodesByCondition, enabledTaint)
}
}()
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.TaintNodesByCondition, true)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", features.TaintNodesByCondition, true)
}
forEachFeatureGate(t, func(t *testing.T) {
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
server, closeFn, dc, informers, clientset := setup(t)
defer closeFn()
ns := framework.CreateTestingNamespace("daemonset-unschedulable-test", server, t)
defer framework.DeleteTestingNamespace(ns, server, t)
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
podClient := clientset.CoreV1().Pods(ns.Name)
nodeClient := clientset.CoreV1().Nodes()
podInformer := informers.Core().V1().Pods().Informer()
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
go dc.Run(5, stopCh)
// Start Scheduler
setupScheduler(t, clientset, informers, stopCh)
ds := newDaemonSet("foo", ns.Name)
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec.HostNetwork = true
_, err := dsClient.Create(ds)
if err != nil {
t.Fatalf("Failed to create DaemonSet: %v", err)
}
defer cleanupDaemonSets(t, clientset, ds)
// Creates unschedulable node.
node := newNode("unschedulable-node", nil)
node.Spec.Unschedulable = true
node.Spec.Taints = []v1.Taint{
{
Key: algorithm.TaintNodeUnschedulable,
Effect: v1.TaintEffectNoSchedule,
},
}
_, err = nodeClient.Create(node)
if err != nil {
t.Fatalf("Failed to create node: %v", err)
}
// Creates network-unavailable node.
nodeNU := newNode("network-unavailable-node", nil)
nodeNU.Status.Conditions = []v1.NodeCondition{
{Type: v1.NodeReady, Status: v1.ConditionFalse},
{Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue},
}
nodeNU.Spec.Taints = []v1.Taint{
{
Key: algorithm.TaintNodeNetworkUnavailable,
Effect: v1.TaintEffectNoSchedule,
},
}
_, err = nodeClient.Create(nodeNU)
if err != nil {
t.Fatalf("Failed to create node: %v", err)
}
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 2, t)
validateDaemonSetStatus(dsClient, ds.Name, 2, t)
})
})
}