Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -29,6 +29,45 @@ go_library(
"//pkg/apis/rbac:go_default_library",
"//pkg/printers:go_default_library",
"//pkg/util/version:go_default_library",
"//staging/src/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
"//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/authorization/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1beta1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
"//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1:go_default_library",
"//test/e2e/apps:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
@@ -37,44 +76,6 @@ go_library(
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
"//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/authorization/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/dynamic:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/util/cert:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
"//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1:go_default_library",
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
"//vendor/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1:go_default_library",
],
)

View File

@@ -133,7 +133,7 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
// kubectl create -f deploy.yaml
deploymentName := "sample-apiserver-deployment"
etcdImage := "quay.io/coreos/etcd:v3.2.18"
etcdImage := "quay.io/coreos/etcd:v3.2.24"
podLabels := map[string]string{"app": "sample-apiserver", "apiserver": "true"}
replicas := int32(1)
zero := int64(0)
@@ -436,7 +436,7 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
unstruct := &unstructuredv1.Unstructured{}
err = unstruct.UnmarshalJSON(jsonFlunder)
framework.ExpectNoError(err, "unmarshalling test-flunder as unstructured for create using dynamic client")
unstruct, err = dynamicClient.Create(unstruct)
unstruct, err = dynamicClient.Create(unstruct, metav1.CreateOptions{})
framework.ExpectNoError(err, "listing flunders using dynamic client")
// kubectl get flunders

View File

@@ -19,12 +19,17 @@ package apimachinery
import (
"fmt"
"math/rand"
"reflect"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/storage/storagebackend"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/test/e2e/framework"
)
@@ -34,11 +39,10 @@ const numberOfTotalResources = 400
var _ = SIGDescribe("Servers with support for API chunking", func() {
f := framework.NewDefaultFramework("chunking")
It("should return chunks of results for list calls", func() {
BeforeEach(func() {
ns := f.Namespace.Name
c := f.ClientSet
client := c.CoreV1().PodTemplates(ns)
By("creating a large number of resources")
workqueue.Parallelize(20, numberOfTotalResources, func(i int) {
for tries := 3; tries >= 0; tries-- {
@@ -61,7 +65,12 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
}
Fail("Unable to create template %d, exiting", i)
})
})
It("should return chunks of results for list calls", func() {
ns := f.Namespace.Name
c := f.ClientSet
client := c.CoreV1().PodTemplates(ns)
By("retrieving those results in paged fashion several times")
for i := 0; i < 3; i++ {
opts := metav1.ListOptions{}
@@ -81,9 +90,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
if len(lastRV) == 0 {
lastRV = list.ResourceVersion
}
if lastRV != list.ResourceVersion {
Expect(list.ResourceVersion).To(Equal(lastRV))
}
Expect(list.ResourceVersion).To(Equal(lastRV))
for _, item := range list.Items {
Expect(item.Name).To(Equal(fmt.Sprintf("template-%04d", found)))
found++
@@ -101,4 +108,81 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
Expect(err).ToNot(HaveOccurred())
Expect(list.Items).To(HaveLen(numberOfTotalResources))
})
It("should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent", func() {
ns := f.Namespace.Name
c := f.ClientSet
client := c.CoreV1().PodTemplates(ns)
By("retrieving the first page")
oneTenth := int64(numberOfTotalResources / 10)
opts := metav1.ListOptions{}
opts.Limit = oneTenth
list, err := client.List(opts)
// TODO: kops PR job is still using etcd2, which prevents this feature from working. Remove this check when kops is upgraded to etcd3
if len(list.Items) > int(opts.Limit) {
framework.Skipf("ERROR: This cluster does not support chunking, which means it is running etcd2 and not supported.")
}
Expect(err).ToNot(HaveOccurred())
firstToken := list.Continue
firstRV := list.ResourceVersion
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, firstToken)
By("retrieving the second page until the token expires")
opts.Continue = firstToken
var inconsistentToken string
wait.Poll(20*time.Second, 2*storagebackend.DefaultCompactInterval, func() (bool, error) {
_, err := client.List(opts)
if err == nil {
framework.Logf("Token %s has not expired yet", firstToken)
return false, nil
}
if err != nil && !errors.IsResourceExpired(err) {
return false, err
}
framework.Logf("got error %s", err)
status, ok := err.(errors.APIStatus)
if !ok {
return false, fmt.Errorf("expect error to implement the APIStatus interface, got %v", reflect.TypeOf(err))
}
inconsistentToken = status.Status().ListMeta.Continue
if len(inconsistentToken) == 0 {
return false, fmt.Errorf("expect non empty continue token")
}
framework.Logf("Retrieved inconsistent continue %s", inconsistentToken)
return true, nil
})
By("retrieving the second page again with the token received with the error message")
opts.Continue = inconsistentToken
list, err = client.List(opts)
Expect(err).ToNot(HaveOccurred())
Expect(list.ResourceVersion).ToNot(Equal(firstRV))
Expect(len(list.Items)).To(BeNumerically("==", opts.Limit))
found := oneTenth
for _, item := range list.Items {
Expect(item.Name).To(Equal(fmt.Sprintf("template-%04d", found)))
found++
}
By("retrieving all remaining pages")
opts.Continue = list.Continue
lastRV := list.ResourceVersion
for {
list, err := client.List(opts)
Expect(err).ToNot(HaveOccurred())
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
Expect(len(list.Items)).To(BeNumerically("<=", opts.Limit))
Expect(list.ResourceVersion).To(Equal(lastRV))
for _, item := range list.Items {
Expect(item.Name).To(Equal(fmt.Sprintf("template-%04d", found)))
found++
}
if len(list.Continue) == 0 {
break
}
opts.Continue = list.Continue
}
Expect(found).To(BeNumerically("==", numberOfTotalResources))
})
})

View File

@@ -21,7 +21,7 @@ import (
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apiextensions-apiserver/test/integration/testserver"
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -63,14 +63,14 @@ var _ = SIGDescribe("CustomResourceDefinition Watch", func() {
framework.Failf("failed to initialize apiExtensionClient: %v", err)
}
noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped)
noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, f.DynamicClient)
noxuDefinition := fixtures.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped)
noxuDefinition, err = fixtures.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, f.DynamicClient)
if err != nil {
framework.Failf("failed to create CustomResourceDefinition: %v", err)
}
defer func() {
err = testserver.DeleteCustomResourceDefinition(noxuDefinition, apiExtensionClient)
err = fixtures.DeleteCustomResourceDefinition(noxuDefinition, apiExtensionClient)
if err != nil {
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
}
@@ -85,8 +85,8 @@ var _ = SIGDescribe("CustomResourceDefinition Watch", func() {
watchB, err := watchCRWithName(noxuResourceClient, watchCRNameB)
Expect(err).NotTo(HaveOccurred())
testCrA := testserver.NewNoxuInstance(ns, watchCRNameA)
testCrB := testserver.NewNoxuInstance(ns, watchCRNameB)
testCrA := fixtures.NewNoxuInstance(ns, watchCRNameA)
testCrB := fixtures.NewNoxuInstance(ns, watchCRNameB)
By("Creating first CR ")
testCrA, err = instantiateCustomResource(testCrA, noxuResourceClient, noxuDefinition)
@@ -125,7 +125,7 @@ func watchCRWithName(crdResourceClient dynamic.ResourceInterface, name string) (
}
func instantiateCustomResource(instanceToCreate *unstructured.Unstructured, client dynamic.ResourceInterface, definition *apiextensionsv1beta1.CustomResourceDefinition) (*unstructured.Unstructured, error) {
createdInstance, err := client.Create(instanceToCreate)
createdInstance, err := client.Create(instanceToCreate, metav1.CreateOptions{})
if err != nil {
return nil, err
}

View File

@@ -19,7 +19,7 @@ package apimachinery
import (
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apiextensions-apiserver/test/integration/testserver"
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
@@ -34,9 +34,9 @@ var _ = SIGDescribe("CustomResourceDefinition resources", func() {
Context("Simple CustomResourceDefinition", func() {
/*
Testname: crd-creation-test
Description: Create a random Custom Resource Definition and make sure
the API returns success.
Release : v1.9
Testname: Custom Resource Definition, create
Description: Create a API extension client, define a random custom resource definition, create the custom resource. API server MUST be able to create the custom resource.
*/
framework.ConformanceIt("creating/deleting custom resource definition objects works ", func() {
@@ -52,16 +52,16 @@ var _ = SIGDescribe("CustomResourceDefinition resources", func() {
framework.Failf("failed to initialize apiExtensionClient: %v", err)
}
randomDefinition := testserver.NewRandomNameCustomResourceDefinition(v1beta1.ClusterScoped)
randomDefinition := fixtures.NewRandomNameCustomResourceDefinition(v1beta1.ClusterScoped)
//create CRD and waits for the resource to be recognized and available.
randomDefinition, err = testserver.CreateNewCustomResourceDefinition(randomDefinition, apiExtensionClient, f.DynamicClient)
randomDefinition, err = fixtures.CreateNewCustomResourceDefinition(randomDefinition, apiExtensionClient, f.DynamicClient)
if err != nil {
framework.Failf("failed to create CustomResourceDefinition: %v", err)
}
defer func() {
err = testserver.DeleteCustomResourceDefinition(randomDefinition, apiExtensionClient)
err = fixtures.DeleteCustomResourceDefinition(randomDefinition, apiExtensionClient)
if err != nil {
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
}

View File

@@ -27,7 +27,7 @@ import (
"k8s.io/api/extensions/v1beta1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/testserver"
apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/fixtures"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -104,7 +104,7 @@ func getPodTemplateSpec(labels map[string]string) v1.PodTemplateSpec {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
},
},
},
@@ -128,10 +128,6 @@ func newOwnerDeployment(f *framework.Framework, deploymentName string, labels ma
}
}
func getSelector() map[string]string {
return map[string]string{"app": "gc-test"}
}
func newOwnerRC(f *framework.Framework, name string, replicas int32, labels map[string]string) *v1.ReplicationController {
template := getPodTemplateSpec(labels)
return &v1.ReplicationController{
@@ -151,45 +147,6 @@ func newOwnerRC(f *framework.Framework, name string, replicas int32, labels map[
}
}
// verifyRemainingDeploymentsReplicaSetsPods verifies if the number
// of the remaining deployments, replica set and pods are deploymentNum,
// rsNum and podNum. It returns error if the communication with the API
// server fails.
func verifyRemainingDeploymentsReplicaSetsPods(
f *framework.Framework,
clientSet clientset.Interface,
deployment *v1beta1.Deployment,
deploymentNum, rsNum, podNum int,
) (bool, error) {
var ret = true
rs, err := clientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list rs: %v", err)
}
if len(rs.Items) != rsNum {
ret = false
By(fmt.Sprintf("expected %d rs, got %d rs", rsNum, len(rs.Items)))
}
deployments, err := clientSet.ExtensionsV1beta1().Deployments(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list deployments: %v", err)
}
if len(deployments.Items) != deploymentNum {
ret = false
By(fmt.Sprintf("expected %d Deployments, got %d Deployments", deploymentNum, len(deployments.Items)))
}
pods, err := clientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
if len(pods.Items) != podNum {
ret = false
By(fmt.Sprintf("expected %v Pods, got %d Pods", podNum, len(pods.Items)))
}
return ret, nil
}
func newGCPod(name string) *v1.Pod {
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
@@ -204,69 +161,77 @@ func newGCPod(name string) *v1.Pod {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
},
},
},
}
}
// verifyRemainingReplicationControllersPods verifies if the number of the remaining replication
// controllers and pods are rcNum and podNum. It returns error if the
// communication with the API server fails.
func verifyRemainingReplicationControllersPods(f *framework.Framework, clientSet clientset.Interface, rcNum, podNum int) (bool, error) {
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
pods, err := clientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
var ret = true
if len(pods.Items) != podNum {
ret = false
By(fmt.Sprintf("expected %d pods, got %d pods", podNum, len(pods.Items)))
}
rcs, err := rcClient.List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list replication controllers: %v", err)
}
if len(rcs.Items) != rcNum {
ret = false
By(fmt.Sprintf("expected %d RCs, got %d RCs", rcNum, len(rcs.Items)))
}
return ret, nil
}
// verifyRemainingCronJobsJobsPods verifies if the number of remaining cronjobs,
// jobs and pods. It returns error if the communication with the API server fails.
func verifyRemainingCronJobsJobsPods(f *framework.Framework, clientSet clientset.Interface,
cjNum, jobNum, podNum int) (bool, error) {
// verifyRemainingObjects verifies if the number of remaining objects.
// It returns error if the communication with the API server fails.
func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (bool, error) {
var ret = true
cronJobs, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list cronjobs: %v", err)
}
if len(cronJobs.Items) != cjNum {
ret = false
By(fmt.Sprintf("expected %d cronjobs, got %d cronjobs", cjNum, len(cronJobs.Items)))
}
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list jobs: %v", err)
}
if len(jobs.Items) != jobNum {
ret = false
By(fmt.Sprintf("expected %d jobs, got %d jobs", jobNum, len(jobs.Items)))
}
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
if len(pods.Items) != podNum {
ret = false
By(fmt.Sprintf("expected %d pods, got %d pods", podNum, len(pods.Items)))
for object, num := range objects {
switch object {
case "Pods":
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list pods: %v", err)
}
if len(pods.Items) != num {
ret = false
By(fmt.Sprintf("expected %d pods, got %d pods", num, len(pods.Items)))
}
case "Deployments":
deployments, err := f.ClientSet.ExtensionsV1beta1().Deployments(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list deployments: %v", err)
}
if len(deployments.Items) != num {
ret = false
By(fmt.Sprintf("expected %d Deployments, got %d Deployments", num, len(deployments.Items)))
}
case "ReplicaSets":
rs, err := f.ClientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list rs: %v", err)
}
if len(rs.Items) != num {
ret = false
By(fmt.Sprintf("expected %d rs, got %d rs", num, len(rs.Items)))
}
case "ReplicationControllers":
rcs, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list replication controllers: %v", err)
}
if len(rcs.Items) != num {
ret = false
By(fmt.Sprintf("expected %d RCs, got %d RCs", num, len(rcs.Items)))
}
case "CronJobs":
cronJobs, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list cronjobs: %v", err)
}
if len(cronJobs.Items) != num {
ret = false
By(fmt.Sprintf("expected %d cronjobs, got %d cronjobs", num, len(cronJobs.Items)))
}
case "Jobs":
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list jobs: %v", err)
}
if len(jobs.Items) != num {
ret = false
By(fmt.Sprintf("expected %d jobs, got %d jobs", num, len(jobs.Items)))
}
default:
return false, fmt.Errorf("object %s is not supported", object)
}
}
return ret, nil
@@ -312,7 +277,7 @@ func newCronJob(name, schedule string) *batchv1beta1.CronJob {
Containers: []v1.Container{
{
Name: "c",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sleep", "300"},
},
},
@@ -336,10 +301,9 @@ var _ = SIGDescribe("Garbage collector", func() {
f := framework.NewDefaultFramework("gc")
/*
Testname: garbage-collector-delete-rc--propagation-background
Description: Ensure that if deleteOptions.PropagationPolicy is set to Background,
then deleting a ReplicationController should cause pods created
by that RC to also be deleted.
Release : v1.9
Testname: Garbage Collector, delete replication controller, propagation policy background
Description: Create a replication controller with 2 Pods. Once RC is created and the first Pod is created, delete RC with deleteOptions.PropagationPolicy set to Background. Deleting the Replication Controller MUST cause pods created by that RC to be deleted.
*/
framework.ConformanceIt("should delete pods created by rc when not orphaning", func() {
clientSet := f.ClientSet
@@ -380,7 +344,8 @@ var _ = SIGDescribe("Garbage collector", func() {
By("wait for all pods to be garbage collected")
// wait for the RCs and Pods to reach the expected numbers.
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
return verifyRemainingReplicationControllersPods(f, clientSet, 0, 0)
objects := map[string]int{"ReplicationControllers": 0, "Pods": 0}
return verifyRemainingObjects(f, objects)
}); err != nil {
framework.Failf("failed to wait for all pods to be deleted: %v", err)
remainingPods, err := podClient.List(metav1.ListOptions{})
@@ -394,10 +359,9 @@ var _ = SIGDescribe("Garbage collector", func() {
})
/*
Testname: garbage-collector-delete-rc--propagation-orphan
Description: Ensure that if deleteOptions.PropagationPolicy is set to Orphan,
then deleting a ReplicationController should cause pods created
by that RC to be orphaned.
Release : v1.9
Testname: Garbage Collector, delete replication controller, propagation policy orphan
Description: Create a replication controller with maximum allocatable Pods between 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC with deleteOptions.PropagationPolicy set to Orphan. Deleting the Replication Controller MUST cause pods created by that RC to be orphaned.
*/
framework.ConformanceIt("should orphan pods created by rc if delete options say so", func() {
clientSet := f.ClientSet
@@ -463,6 +427,8 @@ var _ = SIGDescribe("Garbage collector", func() {
gatherMetrics(f)
})
// deleteOptions.OrphanDependents is deprecated in 1.7 and preferred to use the PropagationPolicy.
// Discussion is tracked under https://github.com/kubernetes/kubernetes/issues/65427 to promote for conformance in future.
It("should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func() {
clientSet := f.ClientSet
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
@@ -508,10 +474,9 @@ var _ = SIGDescribe("Garbage collector", func() {
})
/*
Testname: garbage-collector-delete-deployment-propagation-background
Description: Ensure that if deleteOptions.PropagationPolicy is set to Background,
then deleting a Deployment should cause ReplicaSets created
by that Deployment to also be deleted.
Release : v1.9
Testname: Garbage Collector, delete deployment, propagation policy background
Description: Create a deployment with a replicaset. Once replicaset is created , delete the deployment with deleteOptions.PropagationPolicy set to Background. Deleting the deployment MUST delete the replicaset created by the deployment and also the Pods that belong to the deployments MUST be deleted.
*/
framework.ConformanceIt("should delete RS created by deployment when not orphaning", func() {
clientSet := f.ClientSet
@@ -547,7 +512,8 @@ var _ = SIGDescribe("Garbage collector", func() {
}
By("wait for all rs to be garbage collected")
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
return verifyRemainingDeploymentsReplicaSetsPods(f, clientSet, deployment, 0, 0, 0)
objects := map[string]int{"Deployments": 0, "ReplicaSets": 0, "Pods": 0}
return verifyRemainingObjects(f, objects)
})
if err != nil {
errList := make([]error, 0)
@@ -567,10 +533,9 @@ var _ = SIGDescribe("Garbage collector", func() {
})
/*
Testname: garbage-collector-delete-deployment-propagation-true
Description: Ensure that if deleteOptions.PropagationPolicy is set to Orphan,
then deleting a Deployment should cause ReplicaSets created
by that Deployment to be orphaned.
Release : v1.9
Testname: Garbage Collector, delete deployment, propagation policy orphan
Description: Create a deployment with a replicaset. Once replicaset is created , delete the deployment with deleteOptions.PropagationPolicy set to Orphan. Deleting the deployment MUST cause the replicaset created by the deployment to be orphaned, also the Pods created by the deployments MUST be orphaned.
*/
framework.ConformanceIt("should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan", func() {
clientSet := f.ClientSet
@@ -606,7 +571,8 @@ var _ = SIGDescribe("Garbage collector", func() {
}
By("wait for 30 seconds to see if the garbage collector mistakenly deletes the rs")
time.Sleep(30 * time.Second)
ok, err := verifyRemainingDeploymentsReplicaSetsPods(f, clientSet, deployment, 0, 1, 2)
objects := map[string]int{"Deployments": 0, "ReplicaSets": 1, "Pods": 2}
ok, err := verifyRemainingObjects(f, objects)
if err != nil {
framework.Failf("Unexpected error while verifying remaining deployments, rs, and pods: %v", err)
}
@@ -641,9 +607,9 @@ var _ = SIGDescribe("Garbage collector", func() {
})
/*
Testname: garbage-collector-delete-rc-after-owned-pods
Description: Ensure that if deleteOptions.PropagationPolicy is set to Foreground,
then a ReplicationController should not be deleted until all its dependent pods are deleted.
Release : v1.9
Testname: Garbage Collector, delete replication controller, after owned pods
Description: Create a replication controller with maximum allocatable Pods between 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC with deleteOptions.PropagationPolicy set to Foreground. Deleting the Replication Controller MUST cause pods created by that RC to be deleted before the RC is deleted.
*/
framework.ConformanceIt("should keep the rc around until all its pods are deleted if the deleteOptions says so", func() {
clientSet := f.ClientSet
@@ -729,9 +695,9 @@ var _ = SIGDescribe("Garbage collector", func() {
// TODO: this should be an integration test
/*
Testname: garbage-collector-multiple-owners
Description: Ensure that if a Pod has multiple valid owners, it will not be deleted
when one of of those owners gets deleted.
Release : v1.9
Testname: Garbage Collector, multiple owners
Description: Create a replication controller RC1, with maximum allocatable Pods between 10 and 100 replicas. Create second replication controller RC2 and set RC2 as owner for half of those replicas. Once RC1 is created and the all Pods are created, delete RC1 with deleteOptions.PropagationPolicy set to Foreground. Half of the Pods that has RC2 as owner MUST not be deleted but have a deletion timestamp. Deleting the Replication Controller MUST not delete Pods that are owned by multiple replication controllers.
*/
framework.ConformanceIt("should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted", func() {
clientSet := f.ClientSet
@@ -843,9 +809,9 @@ var _ = SIGDescribe("Garbage collector", func() {
// TODO: should be an integration test
/*
Testname: garbage-collector-dependency-cycle
Description: Ensure that a dependency cycle will
not block the garbage collector.
Release : v1.9
Testname: Garbage Collector, dependency cycle
Description: Create three pods, patch them with Owner references such that pod1 has pod3, pod2 has pod1 and pod3 has pod2 as owner references respectively. Delete pod1 MUST delete all pods. The dependency cycle MUST not block the garbage collection.
*/
framework.ConformanceIt("should not be blocked by dependency circle", func() {
clientSet := f.ClientSet
@@ -939,7 +905,7 @@ var _ = SIGDescribe("Garbage collector", func() {
},
},
}
persistedOwner, err := resourceClient.Create(owner)
persistedOwner, err := resourceClient.Create(owner, metav1.CreateOptions{})
if err != nil {
framework.Failf("failed to create owner resource %q: %v", ownerName, err)
}
@@ -964,7 +930,7 @@ var _ = SIGDescribe("Garbage collector", func() {
},
},
}
persistedDependent, err := resourceClient.Create(dependent)
persistedDependent, err := resourceClient.Create(dependent, metav1.CreateOptions{})
if err != nil {
framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
}
@@ -1040,7 +1006,7 @@ var _ = SIGDescribe("Garbage collector", func() {
},
},
}
persistedOwner, err := resourceClient.Create(owner)
persistedOwner, err := resourceClient.Create(owner, metav1.CreateOptions{})
if err != nil {
framework.Failf("failed to create owner resource %q: %v", ownerName, err)
}
@@ -1065,7 +1031,7 @@ var _ = SIGDescribe("Garbage collector", func() {
},
},
}
_, err = resourceClient.Create(dependent)
_, err = resourceClient.Create(dependent, metav1.CreateOptions{})
if err != nil {
framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
}
@@ -1127,7 +1093,8 @@ var _ = SIGDescribe("Garbage collector", func() {
}
By("Verify if cronjob does not leave jobs nor pods behind")
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
return verifyRemainingCronJobsJobsPods(f, f.ClientSet, 0, 0, 0)
objects := map[string]int{"CronJobs": 0, "Jobs": 0, "Pods": 0}
return verifyRemainingObjects(f, objects)
})
if err != nil {
framework.Failf("Failed to wait for all jobs and pods to be deleted: %v", err)

View File

@@ -49,7 +49,7 @@ func stagingClientPod(name, value string) v1.Pod {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
Ports: []v1.ContainerPort{{ContainerPort: 80}},
},
},
@@ -70,7 +70,7 @@ func testingPod(name, value string) v1.Pod {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
Ports: []v1.ContainerPort{{ContainerPort: 80}},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
@@ -243,7 +243,7 @@ func newTestingCronJob(name string, value string) *batchv1beta1.CronJob {
Containers: []v1.Container{
{
Name: "c",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
VolumeMounts: []v1.VolumeMount{
{
MountPath: "/data",

View File

@@ -262,10 +262,18 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
f := framework.NewDefaultFramework("namespaces")
It("should ensure that all pods are removed when a namespace is deleted.",
/*
Testname: namespace-deletion-removes-pods
Description: Ensure that if a namespace is deleted then all pods are removed from that namespace.
*/
framework.ConformanceIt("should ensure that all pods are removed when a namespace is deleted",
func() { ensurePodsAreRemovedWhenNamespaceIsDeleted(f) })
It("should ensure that all services are removed when a namespace is deleted.",
/*
Testname: namespace-deletion-removes-services
Description: Ensure that if a namespace is deleted then all services are removed from that namespace.
*/
framework.ConformanceIt("should ensure that all services are removed when a namespace is deleted",
func() { ensureServicesAreRemovedWhenNamespaceIsDeleted(f) })
It("should delete fast enough (90 percent of 100 namespaces in 150 seconds)",

View File

@@ -52,21 +52,23 @@ const (
roleBindingName = "webhook-auth-reader"
// The webhook configuration names should not be reused between test instances.
crWebhookConfigName = "e2e-test-webhook-config-cr"
webhookConfigName = "e2e-test-webhook-config"
mutatingWebhookConfigName = "e2e-test-mutating-webhook-config"
podMutatingWebhookConfigName = "e2e-test-mutating-webhook-pod"
crMutatingWebhookConfigName = "e2e-test-mutating-webhook-config-cr"
webhookFailClosedConfigName = "e2e-test-webhook-fail-closed"
webhookForWebhooksConfigName = "e2e-test-webhook-for-webhooks-config"
removableValidatingHookName = "e2e-test-should-be-removable-validating-webhook-config"
removableMutatingHookName = "e2e-test-should-be-removable-mutating-webhook-config"
crdWebhookConfigName = "e2e-test-webhook-config-crd"
crWebhookConfigName = "e2e-test-webhook-config-cr"
webhookConfigName = "e2e-test-webhook-config"
attachingPodWebhookConfigName = "e2e-test-webhook-config-attaching-pod"
mutatingWebhookConfigName = "e2e-test-mutating-webhook-config"
podMutatingWebhookConfigName = "e2e-test-mutating-webhook-pod"
crMutatingWebhookConfigName = "e2e-test-mutating-webhook-config-cr"
webhookFailClosedConfigName = "e2e-test-webhook-fail-closed"
webhookForWebhooksConfigName = "e2e-test-webhook-for-webhooks-config"
removableValidatingHookName = "e2e-test-should-be-removable-validating-webhook-config"
removableMutatingHookName = "e2e-test-should-be-removable-mutating-webhook-config"
crdWebhookConfigName = "e2e-test-webhook-config-crd"
skipNamespaceLabelKey = "skip-webhook-admission"
skipNamespaceLabelValue = "yes"
skippedNamespaceName = "exempted-namesapce"
disallowedPodName = "disallowed-pod"
toBeAttachedPodName = "to-be-attached-pod"
hangingPodName = "hanging-pod"
disallowedConfigMapName = "disallowed-configmap"
allowedConfigMapName = "allowed-configmap"
@@ -117,6 +119,12 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
testWebhook(f)
})
It("Should be able to deny attaching pod", func() {
webhookCleanup := registerWebhookForAttachingPod(f, context)
defer webhookCleanup()
testAttachingPodWebhook(f)
})
It("Should be able to deny custom resource creation", func() {
testcrd, err := framework.CreateTestCRD(f)
if err != nil {
@@ -405,6 +413,53 @@ func registerWebhook(f *framework.Framework, context *certContext) func() {
}
}
func registerWebhookForAttachingPod(f *framework.Framework, context *certContext) func() {
client := f.ClientSet
By("Registering the webhook via the AdmissionRegistration API")
namespace := f.Namespace.Name
configName := attachingPodWebhookConfigName
// A webhook that cannot talk to server, with fail-open policy
failOpenHook := failingWebhook(namespace, "fail-open.k8s.io")
policyIgnore := v1beta1.Ignore
failOpenHook.FailurePolicy = &policyIgnore
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: configName,
},
Webhooks: []v1beta1.Webhook{
{
Name: "deny-attaching-pod.k8s.io",
Rules: []v1beta1.RuleWithOperations{{
Operations: []v1beta1.OperationType{v1beta1.Connect},
Rule: v1beta1.Rule{
APIGroups: []string{""},
APIVersions: []string{"v1"},
Resources: []string{"pods/attach"},
},
}},
ClientConfig: v1beta1.WebhookClientConfig{
Service: &v1beta1.ServiceReference{
Namespace: namespace,
Name: serviceName,
Path: strPtr("/pods/attach"),
},
CABundle: context.signingCert,
},
},
},
})
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
// The webhook configuration is honored in 10s.
time.Sleep(10 * time.Second)
return func() {
client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(configName, nil)
}
}
func registerMutatingWebhookForConfigMap(f *framework.Framework, context *certContext) func() {
client := f.ClientSet
By("Registering the mutating configmap webhook via the AdmissionRegistration API")
@@ -576,7 +631,7 @@ func testWebhook(f *framework.Framework) {
pod = hangingPod(f)
_, err = client.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(BeNil())
expectedTimeoutErr := "request did not complete within allowed duration"
expectedTimeoutErr := "request did not complete within"
if !strings.Contains(err.Error(), expectedTimeoutErr) {
framework.Failf("expect timeout error %q, got %q", expectedTimeoutErr, err.Error())
}
@@ -642,6 +697,21 @@ func testWebhook(f *framework.Framework) {
Expect(err).To(BeNil())
}
func testAttachingPodWebhook(f *framework.Framework) {
By("create a pod")
client := f.ClientSet
pod := toBeAttachedPod(f)
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(BeNil())
By("'kubectl attach' the pod, should be denied by the webhook")
_, err = framework.NewKubectlCommand("attach", fmt.Sprintf("--namespace=%v", f.Namespace.Name), pod.Name, "-i", "-c=container1").Exec()
Expect(err).NotTo(BeNil())
if e, a := "attaching to pod 'to-be-attached-pod' is not allowed", err.Error(); !strings.Contains(a, e) {
framework.Failf("unexpected 'kubectl attach' error message. expected to contain %q, got %q", e, a)
}
}
// failingWebhook returns a webhook with rule of create configmaps,
// but with an invalid client config so that server cannot communicate with it
func failingWebhook(namespace, name string) v1beta1.Webhook {
@@ -930,6 +1000,22 @@ func hangingPod(f *framework.Framework) *v1.Pod {
}
}
func toBeAttachedPod(f *framework.Framework) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: toBeAttachedPodName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container1",
Image: imageutils.GetPauseImageName(),
},
},
},
}
}
func nonCompliantConfigMap(f *framework.Framework) *v1.ConfigMap {
return &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
@@ -1100,7 +1186,7 @@ func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1
},
},
}
_, err := customResourceClient.Create(crInstance)
_, err := customResourceClient.Create(crInstance, metav1.CreateOptions{})
Expect(err).NotTo(BeNil())
expectedErrMsg := "the custom resource contains unwanted data"
if !strings.Contains(err.Error(), expectedErrMsg) {
@@ -1123,7 +1209,7 @@ func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextension
},
},
}
mutatedCR, err := customResourceClient.Create(cr)
mutatedCR, err := customResourceClient.Create(cr, metav1.CreateOptions{})
Expect(err).To(BeNil())
expectedCRData := map[string]interface{}{
"mutation-start": "yes",