Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -37,7 +37,28 @@ go_library(
"//pkg/controller/replication:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/scheduler/cache:go_default_library",
"//pkg/util/pointer:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1beta1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
@@ -45,27 +66,7 @@ go_library(
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library",
],
)

View File

@@ -5,3 +5,5 @@ approvers:
- mfojtik
reviewers:
- sig-apps-reviewers
labels:
- sig/apps

View File

@@ -34,6 +34,7 @@ import (
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/controller/job"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
@@ -298,7 +299,7 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batchv1beta1.Concur
Containers: []v1.Container{
{
Name: "c",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
VolumeMounts: []v1.VolumeMount{
{
MountPath: "/data",

View File

@@ -24,8 +24,7 @@ import (
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
apierrs "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
@@ -54,6 +53,10 @@ const (
daemonsetColorLabel = daemonsetLabelPrefix + "color"
)
// The annotation key scheduler.alpha.kubernetes.io/node-selector is for assigning
// node selectors labels to namespaces
var NamespaceNodeSelectors = []string{"scheduler.alpha.kubernetes.io/node-selector"}
// This test must be run in serial because it assumes the Daemon Set pods will
// always get scheduled. If we run other tests in parallel, this may not
// happen. In the future, running in parallel may work if we have an eviction
@@ -100,7 +103,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ns = f.Namespace.Name
c = f.ClientSet
err := clearDaemonSetNodeLabels(c)
updatedNS, err := updateNamespaceAnnotations(c, ns)
Expect(err).NotTo(HaveOccurred())
ns = updatedNS.Name
err = clearDaemonSetNodeLabels(c)
Expect(err).NotTo(HaveOccurred())
})
@@ -495,6 +504,26 @@ func clearDaemonSetNodeLabels(c clientset.Interface) error {
return nil
}
// updateNamespaceAnnotations sets node selectors related annotations on tests namespaces to empty
func updateNamespaceAnnotations(c clientset.Interface, nsName string) (*v1.Namespace, error) {
nsClient := c.CoreV1().Namespaces()
ns, err := nsClient.Get(nsName, metav1.GetOptions{})
if err != nil {
return nil, err
}
if ns.Annotations == nil {
ns.Annotations = make(map[string]string)
}
for _, n := range NamespaceNodeSelectors {
ns.Annotations[n] = ""
}
return nsClient.Update(ns)
}
func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) {
nodeClient := c.CoreV1().Nodes()
var newNode *v1.Node
@@ -520,7 +549,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
newLabels, _ = separateDaemonSetNodeLabels(newNode.Labels)
return true, err
}
if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
if se, ok := err.(*apierrors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
framework.Logf("failed to update node due to resource version conflict")
return false, nil
}
@@ -734,7 +763,7 @@ func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *a
func waitFailedDaemonPodDeleted(c clientset.Interface, pod *v1.Pod) func() (bool, error) {
return func() (bool, error) {
if _, err := c.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil {
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
return true, nil
}
return false, fmt.Errorf("failed to get failed daemon pod %q: %v", pod.Name, err)

View File

@@ -38,9 +38,9 @@ import (
clientset "k8s.io/client-go/kubernetes"
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
"k8s.io/kubernetes/test/e2e/framework"
testutil "k8s.io/kubernetes/test/utils"
utilpointer "k8s.io/utils/pointer"
)
const (
@@ -70,16 +70,35 @@ var _ = SIGDescribe("Deployment", func() {
It("deployment reaping should cascade to its replica sets and pods", func() {
testDeleteDeployment(f)
})
It("RollingUpdateDeployment should delete old pods and create new ones", func() {
/*
Testname: Deployment RollingUpdate
Description: A conformant Kubernetes distribution MUST support the Deployment with RollingUpdate strategy.
*/
framework.ConformanceIt("RollingUpdateDeployment should delete old pods and create new ones", func() {
testRollingUpdateDeployment(f)
})
It("RecreateDeployment should delete old pods and create new ones", func() {
/*
Testname: Deployment Recreate
Description: A conformant Kubernetes distribution MUST support the Deployment with Recreate strategy.
*/
framework.ConformanceIt("RecreateDeployment should delete old pods and create new ones", func() {
testRecreateDeployment(f)
})
It("deployment should delete old replica sets", func() {
/*
Testname: Deployment RevisionHistoryLimit
Description: A conformant Kubernetes distribution MUST clean up Deployment's ReplicaSets based on
the Deployment's `.spec.revisionHistoryLimit`.
*/
framework.ConformanceIt("deployment should delete old replica sets", func() {
testDeploymentCleanUpPolicy(f)
})
It("deployment should support rollover", func() {
/*
Testname: Deployment Rollover
Description: A conformant Kubernetes distribution MUST support Deployment rollover,
i.e. allow arbitrary number of changes to desired state during rolling update
before the rollout finishes.
*/
framework.ConformanceIt("deployment should support rollover", func() {
testRolloverDeployment(f)
})
It("deployment should support rollback", func() {
@@ -91,7 +110,13 @@ var _ = SIGDescribe("Deployment", func() {
It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() {
testDeploymentsControllerRef(f)
})
It("deployment should support proportional scaling", func() {
/*
Testname: Deployment Proportional Scaling
Description: A conformant Kubernetes distribution MUST support Deployment
proportional scaling, i.e. proportionally scale a Deployment's ReplicaSets
when a Deployment is scaled.
*/
framework.ConformanceIt("deployment should support proportional scaling", func() {
testProportionalScalingDeployment(f)
})
// TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues

View File

@@ -44,7 +44,7 @@ var _ = SIGDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions")
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
Expect(err).NotTo(HaveOccurred())
})
@@ -63,7 +63,7 @@ var _ = SIGDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions")
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
Expect(err).NotTo(HaveOccurred())
})
@@ -84,7 +84,7 @@ var _ = SIGDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions")
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
Expect(err).NotTo(HaveOccurred())
})

View File

@@ -106,12 +106,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
f := framework.NewDefaultFramework("network-partition")
var c clientset.Interface
var ns string
ignoreLabels := framework.ImagePullerLabels
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
_, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
_, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
Expect(err).NotTo(HaveOccurred())
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
@@ -197,7 +196,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
go controller.Run(stopCh)
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
host := framework.GetNodeExternalIP(&node)
host, err := framework.GetNodeExternalIP(&node)
framework.ExpectNoError(err)
master := framework.GetMasterAddress(c)
defer func() {
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
@@ -574,7 +574,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
go controller.Run(stopCh)
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
host := framework.GetNodeExternalIP(&node)
host, err := framework.GetNodeExternalIP(&node)
framework.ExpectNoError(err)
master := framework.GetMasterAddress(c)
defer func() {
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))

View File

@@ -17,6 +17,7 @@ limitations under the License.
package apps
import (
"context"
"fmt"
"strings"
"time"
@@ -31,6 +32,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@@ -248,6 +250,14 @@ var _ = SIGDescribe("StatefulSet", func() {
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
})
// This can't be Conformance yet because it depends on a default
// StorageClass and a dynamic provisioner.
It("should perform rolling updates and roll backs of template modifications with PVCs", func() {
By("Creating a new StatefulSet with PVCs")
*(ss.Spec.Replicas) = 3
rollbackTest(c, ns, ss)
})
/*
Release : v1.9
Testname: StatefulSet, Rolling Update
@@ -256,116 +266,7 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func() {
By("Creating a new StatefulSet")
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
sst := framework.NewStatefulSetTester(c)
sst.SetHttpProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
Expect(err).NotTo(HaveOccurred())
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).To(Equal(updateRevision),
fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
ss.Namespace, ss.Name, updateRevision, currentRevision))
pods := sst.GetPodList(ss)
for i := range pods.Items {
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
currentRevision))
}
sst.SortStatefulPods(pods)
sst.BreakPodHttpProbe(ss, &pods.Items[1])
Expect(err).NotTo(HaveOccurred())
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
newImage := NewNginxImage
oldImage := ss.Spec.Template.Spec.Containers[0].Image
By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))
Expect(oldImage).NotTo(Equal(newImage), "Incorrect test setup: should update to a different image")
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
Expect(err).NotTo(HaveOccurred())
By("Creating a new revision")
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).NotTo(Equal(updateRevision),
"Current revision should not equal update revision during rolling update")
By("Updating Pods in reverse ordinal order")
pods = sst.GetPodList(ss)
sst.SortStatefulPods(pods)
sst.RestorePodHttpProbe(ss, &pods.Items[1])
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
ss, pods = sst.WaitForRollingUpdate(ss)
Expect(ss.Status.CurrentRevision).To(Equal(updateRevision),
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
ss.Namespace,
ss.Name,
ss.Status.CurrentRevision,
updateRevision))
for i := range pods.Items {
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage),
fmt.Sprintf(" Pod %s/%s has image %s not have new image %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
newImage))
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to update revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
updateRevision))
}
By("Rolling back to a previous revision")
sst.BreakPodHttpProbe(ss, &pods.Items[1])
Expect(err).NotTo(HaveOccurred())
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
priorRevision := currentRevision
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = oldImage
})
Expect(err).NotTo(HaveOccurred())
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).NotTo(Equal(updateRevision),
"Current revision should not equal update revision during roll back")
Expect(priorRevision).To(Equal(updateRevision),
"Prior revision should equal update revision during roll back")
By("Rolling back update in reverse ordinal order")
pods = sst.GetPodList(ss)
sst.SortStatefulPods(pods)
sst.RestorePodHttpProbe(ss, &pods.Items[1])
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
ss, pods = sst.WaitForRollingUpdate(ss)
Expect(ss.Status.CurrentRevision).To(Equal(priorRevision),
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
ss.Namespace,
ss.Name,
ss.Status.CurrentRevision,
updateRevision))
for i := range pods.Items {
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage),
fmt.Sprintf("Pod %s/%s has image %s not equal to previous image %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
oldImage))
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(priorRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to prior revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
priorRevision))
}
rollbackTest(c, ns, ss)
})
/*
@@ -700,7 +601,9 @@ var _ = SIGDescribe("StatefulSet", func() {
By("Verifying that stateful set " + ssName + " was scaled up in order")
expectedOrder := []string{ssName + "-0", ssName + "-1", ssName + "-2"}
_, err = watch.Until(framework.StatefulSetTimeout, watcher, func(event watch.Event) (bool, error) {
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulSetTimeout)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) {
if event.Type != watch.Added {
return false, nil
}
@@ -731,7 +634,9 @@ var _ = SIGDescribe("StatefulSet", func() {
By("Verifying that stateful set " + ssName + " was scaled down in reverse order")
expectedOrder = []string{ssName + "-2", ssName + "-1", ssName + "-0"}
_, err = watch.Until(framework.StatefulSetTimeout, watcher, func(event watch.Event) (bool, error) {
ctx, cancel = watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulSetTimeout)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) {
if event.Type != watch.Deleted {
return false, nil
}
@@ -810,7 +715,7 @@ var _ = SIGDescribe("StatefulSet", func() {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
Ports: []v1.ContainerPort{conflictingPort},
},
},
@@ -837,8 +742,10 @@ var _ = SIGDescribe("StatefulSet", func() {
By("Waiting until stateful pod " + statefulPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name)
w, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: statefulPodName}))
framework.ExpectNoError(err)
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulPodTimeout)
defer cancel()
// we need to get UID from pod in any state and wait until stateful set controller will remove pod atleast once
_, err = watch.Until(framework.StatefulPodTimeout, w, func(event watch.Event) (bool, error) {
_, err = watchtools.UntilWithoutRetry(ctx, w, func(event watch.Event) (bool, error) {
pod := event.Object.(*v1.Pod)
switch event.Type {
case watch.Deleted:
@@ -862,7 +769,7 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.ExpectNoError(err)
By("Waiting when stateful pod " + statefulPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state")
// we may catch delete event, thats why we are waiting for running phase like this, and not with watch.Until
// we may catch delete event, that's why we are waiting for running phase like this, and not with watchtools.UntilWithoutRetry
Eventually(func() error {
statefulPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(statefulPodName, metav1.GetOptions{})
if err != nil {
@@ -1176,3 +1083,119 @@ func pollReadWithTimeout(statefulPod statefulPodTester, statefulPodNumber int, k
}
return err
}
// This function is used by two tests to test StatefulSet rollbacks: one using
// PVCs and one using no storage.
func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
sst := framework.NewStatefulSetTester(c)
sst.SetHttpProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
Expect(err).NotTo(HaveOccurred())
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).To(Equal(updateRevision),
fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
ss.Namespace, ss.Name, updateRevision, currentRevision))
pods := sst.GetPodList(ss)
for i := range pods.Items {
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
currentRevision))
}
sst.SortStatefulPods(pods)
err = sst.BreakPodHttpProbe(ss, &pods.Items[1])
Expect(err).NotTo(HaveOccurred())
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
newImage := NewNginxImage
oldImage := ss.Spec.Template.Spec.Containers[0].Image
By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))
Expect(oldImage).NotTo(Equal(newImage), "Incorrect test setup: should update to a different image")
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
Expect(err).NotTo(HaveOccurred())
By("Creating a new revision")
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).NotTo(Equal(updateRevision),
"Current revision should not equal update revision during rolling update")
By("Updating Pods in reverse ordinal order")
pods = sst.GetPodList(ss)
sst.SortStatefulPods(pods)
err = sst.RestorePodHttpProbe(ss, &pods.Items[1])
Expect(err).NotTo(HaveOccurred())
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
ss, pods = sst.WaitForRollingUpdate(ss)
Expect(ss.Status.CurrentRevision).To(Equal(updateRevision),
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
ss.Namespace,
ss.Name,
ss.Status.CurrentRevision,
updateRevision))
for i := range pods.Items {
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage),
fmt.Sprintf(" Pod %s/%s has image %s not have new image %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
newImage))
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to update revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
updateRevision))
}
By("Rolling back to a previous revision")
err = sst.BreakPodHttpProbe(ss, &pods.Items[1])
Expect(err).NotTo(HaveOccurred())
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
priorRevision := currentRevision
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = oldImage
})
Expect(err).NotTo(HaveOccurred())
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).NotTo(Equal(updateRevision),
"Current revision should not equal update revision during roll back")
Expect(priorRevision).To(Equal(updateRevision),
"Prior revision should equal update revision during roll back")
By("Rolling back update in reverse ordinal order")
pods = sst.GetPodList(ss)
sst.SortStatefulPods(pods)
sst.RestorePodHttpProbe(ss, &pods.Items[1])
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
ss, pods = sst.WaitForRollingUpdate(ss)
Expect(ss.Status.CurrentRevision).To(Equal(priorRevision),
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
ss.Namespace,
ss.Name,
ss.Status.CurrentRevision,
updateRevision))
for i := range pods.Items {
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage),
fmt.Sprintf("Pod %s/%s has image %s not equal to previous image %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
oldImage))
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(priorRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to prior revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
priorRevision))
}
}

View File

@@ -31,7 +31,7 @@ var (
CronJobGroupVersionResourceBeta = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"}
NautilusImage = imageutils.GetE2EImage(imageutils.Nautilus)
KittenImage = imageutils.GetE2EImage(imageutils.Kitten)
NginxImage = imageutils.GetE2EImage(imageutils.NginxSlim)
NewNginxImage = imageutils.GetE2EImage(imageutils.NginxSlimNew)
NginxImage = imageutils.GetE2EImage(imageutils.Nginx)
NewNginxImage = imageutils.GetE2EImage(imageutils.NginxNew)
RedisImage = imageutils.GetE2EImage(imageutils.Redis)
)