Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -46,6 +46,18 @@ go_library(
"//pkg/cloudprovider/providers/azure:go_default_library",
"//pkg/cloudprovider/providers/gce:go_default_library",
"//pkg/version:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/logs:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/plugin/pkg/client/auth:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/ginkgowrapper:go_default_library",
@@ -58,18 +70,6 @@ go_library(
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
"//vendor/github.com/onsi/ginkgo/reporters:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/logs:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/plugin/pkg/client/auth:go_default_library",
],
)

View File

@@ -29,6 +29,45 @@ go_library(
"//pkg/apis/rbac:go_default_library",
"//pkg/printers:go_default_library",
"//pkg/util/version:go_default_library",
"//staging/src/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
"//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/authorization/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1beta1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
"//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1:go_default_library",
"//test/e2e/apps:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
@@ -37,44 +76,6 @@ go_library(
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
"//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/authorization/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/dynamic:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/util/cert:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
"//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1:go_default_library",
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
"//vendor/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1:go_default_library",
],
)

View File

@@ -133,7 +133,7 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
// kubectl create -f deploy.yaml
deploymentName := "sample-apiserver-deployment"
etcdImage := "quay.io/coreos/etcd:v3.2.18"
etcdImage := "quay.io/coreos/etcd:v3.2.24"
podLabels := map[string]string{"app": "sample-apiserver", "apiserver": "true"}
replicas := int32(1)
zero := int64(0)
@@ -436,7 +436,7 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
unstruct := &unstructuredv1.Unstructured{}
err = unstruct.UnmarshalJSON(jsonFlunder)
framework.ExpectNoError(err, "unmarshalling test-flunder as unstructured for create using dynamic client")
unstruct, err = dynamicClient.Create(unstruct)
unstruct, err = dynamicClient.Create(unstruct, metav1.CreateOptions{})
framework.ExpectNoError(err, "listing flunders using dynamic client")
// kubectl get flunders

View File

@@ -19,12 +19,17 @@ package apimachinery
import (
"fmt"
"math/rand"
"reflect"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/storage/storagebackend"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/test/e2e/framework"
)
@@ -34,11 +39,10 @@ const numberOfTotalResources = 400
var _ = SIGDescribe("Servers with support for API chunking", func() {
f := framework.NewDefaultFramework("chunking")
It("should return chunks of results for list calls", func() {
BeforeEach(func() {
ns := f.Namespace.Name
c := f.ClientSet
client := c.CoreV1().PodTemplates(ns)
By("creating a large number of resources")
workqueue.Parallelize(20, numberOfTotalResources, func(i int) {
for tries := 3; tries >= 0; tries-- {
@@ -61,7 +65,12 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
}
Fail("Unable to create template %d, exiting", i)
})
})
It("should return chunks of results for list calls", func() {
ns := f.Namespace.Name
c := f.ClientSet
client := c.CoreV1().PodTemplates(ns)
By("retrieving those results in paged fashion several times")
for i := 0; i < 3; i++ {
opts := metav1.ListOptions{}
@@ -81,9 +90,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
if len(lastRV) == 0 {
lastRV = list.ResourceVersion
}
if lastRV != list.ResourceVersion {
Expect(list.ResourceVersion).To(Equal(lastRV))
}
Expect(list.ResourceVersion).To(Equal(lastRV))
for _, item := range list.Items {
Expect(item.Name).To(Equal(fmt.Sprintf("template-%04d", found)))
found++
@@ -101,4 +108,81 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
Expect(err).ToNot(HaveOccurred())
Expect(list.Items).To(HaveLen(numberOfTotalResources))
})
It("should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent", func() {
ns := f.Namespace.Name
c := f.ClientSet
client := c.CoreV1().PodTemplates(ns)
By("retrieving the first page")
oneTenth := int64(numberOfTotalResources / 10)
opts := metav1.ListOptions{}
opts.Limit = oneTenth
list, err := client.List(opts)
// TODO: kops PR job is still using etcd2, which prevents this feature from working. Remove this check when kops is upgraded to etcd3
if len(list.Items) > int(opts.Limit) {
framework.Skipf("ERROR: This cluster does not support chunking, which means it is running etcd2 and not supported.")
}
Expect(err).ToNot(HaveOccurred())
firstToken := list.Continue
firstRV := list.ResourceVersion
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, firstToken)
By("retrieving the second page until the token expires")
opts.Continue = firstToken
var inconsistentToken string
wait.Poll(20*time.Second, 2*storagebackend.DefaultCompactInterval, func() (bool, error) {
_, err := client.List(opts)
if err == nil {
framework.Logf("Token %s has not expired yet", firstToken)
return false, nil
}
if err != nil && !errors.IsResourceExpired(err) {
return false, err
}
framework.Logf("got error %s", err)
status, ok := err.(errors.APIStatus)
if !ok {
return false, fmt.Errorf("expect error to implement the APIStatus interface, got %v", reflect.TypeOf(err))
}
inconsistentToken = status.Status().ListMeta.Continue
if len(inconsistentToken) == 0 {
return false, fmt.Errorf("expect non empty continue token")
}
framework.Logf("Retrieved inconsistent continue %s", inconsistentToken)
return true, nil
})
By("retrieving the second page again with the token received with the error message")
opts.Continue = inconsistentToken
list, err = client.List(opts)
Expect(err).ToNot(HaveOccurred())
Expect(list.ResourceVersion).ToNot(Equal(firstRV))
Expect(len(list.Items)).To(BeNumerically("==", opts.Limit))
found := oneTenth
for _, item := range list.Items {
Expect(item.Name).To(Equal(fmt.Sprintf("template-%04d", found)))
found++
}
By("retrieving all remaining pages")
opts.Continue = list.Continue
lastRV := list.ResourceVersion
for {
list, err := client.List(opts)
Expect(err).ToNot(HaveOccurred())
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
Expect(len(list.Items)).To(BeNumerically("<=", opts.Limit))
Expect(list.ResourceVersion).To(Equal(lastRV))
for _, item := range list.Items {
Expect(item.Name).To(Equal(fmt.Sprintf("template-%04d", found)))
found++
}
if len(list.Continue) == 0 {
break
}
opts.Continue = list.Continue
}
Expect(found).To(BeNumerically("==", numberOfTotalResources))
})
})

View File

@@ -21,7 +21,7 @@ import (
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apiextensions-apiserver/test/integration/testserver"
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -63,14 +63,14 @@ var _ = SIGDescribe("CustomResourceDefinition Watch", func() {
framework.Failf("failed to initialize apiExtensionClient: %v", err)
}
noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped)
noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, f.DynamicClient)
noxuDefinition := fixtures.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped)
noxuDefinition, err = fixtures.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, f.DynamicClient)
if err != nil {
framework.Failf("failed to create CustomResourceDefinition: %v", err)
}
defer func() {
err = testserver.DeleteCustomResourceDefinition(noxuDefinition, apiExtensionClient)
err = fixtures.DeleteCustomResourceDefinition(noxuDefinition, apiExtensionClient)
if err != nil {
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
}
@@ -85,8 +85,8 @@ var _ = SIGDescribe("CustomResourceDefinition Watch", func() {
watchB, err := watchCRWithName(noxuResourceClient, watchCRNameB)
Expect(err).NotTo(HaveOccurred())
testCrA := testserver.NewNoxuInstance(ns, watchCRNameA)
testCrB := testserver.NewNoxuInstance(ns, watchCRNameB)
testCrA := fixtures.NewNoxuInstance(ns, watchCRNameA)
testCrB := fixtures.NewNoxuInstance(ns, watchCRNameB)
By("Creating first CR ")
testCrA, err = instantiateCustomResource(testCrA, noxuResourceClient, noxuDefinition)
@@ -125,7 +125,7 @@ func watchCRWithName(crdResourceClient dynamic.ResourceInterface, name string) (
}
func instantiateCustomResource(instanceToCreate *unstructured.Unstructured, client dynamic.ResourceInterface, definition *apiextensionsv1beta1.CustomResourceDefinition) (*unstructured.Unstructured, error) {
createdInstance, err := client.Create(instanceToCreate)
createdInstance, err := client.Create(instanceToCreate, metav1.CreateOptions{})
if err != nil {
return nil, err
}

View File

@@ -19,7 +19,7 @@ package apimachinery
import (
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apiextensions-apiserver/test/integration/testserver"
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
@@ -34,9 +34,9 @@ var _ = SIGDescribe("CustomResourceDefinition resources", func() {
Context("Simple CustomResourceDefinition", func() {
/*
Testname: crd-creation-test
Description: Create a random Custom Resource Definition and make sure
the API returns success.
Release : v1.9
Testname: Custom Resource Definition, create
Description: Create a API extension client, define a random custom resource definition, create the custom resource. API server MUST be able to create the custom resource.
*/
framework.ConformanceIt("creating/deleting custom resource definition objects works ", func() {
@@ -52,16 +52,16 @@ var _ = SIGDescribe("CustomResourceDefinition resources", func() {
framework.Failf("failed to initialize apiExtensionClient: %v", err)
}
randomDefinition := testserver.NewRandomNameCustomResourceDefinition(v1beta1.ClusterScoped)
randomDefinition := fixtures.NewRandomNameCustomResourceDefinition(v1beta1.ClusterScoped)
//create CRD and waits for the resource to be recognized and available.
randomDefinition, err = testserver.CreateNewCustomResourceDefinition(randomDefinition, apiExtensionClient, f.DynamicClient)
randomDefinition, err = fixtures.CreateNewCustomResourceDefinition(randomDefinition, apiExtensionClient, f.DynamicClient)
if err != nil {
framework.Failf("failed to create CustomResourceDefinition: %v", err)
}
defer func() {
err = testserver.DeleteCustomResourceDefinition(randomDefinition, apiExtensionClient)
err = fixtures.DeleteCustomResourceDefinition(randomDefinition, apiExtensionClient)
if err != nil {
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
}

View File

@@ -27,7 +27,7 @@ import (
"k8s.io/api/extensions/v1beta1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/testserver"
apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/fixtures"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -104,7 +104,7 @@ func getPodTemplateSpec(labels map[string]string) v1.PodTemplateSpec {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
},
},
},
@@ -128,10 +128,6 @@ func newOwnerDeployment(f *framework.Framework, deploymentName string, labels ma
}
}
func getSelector() map[string]string {
return map[string]string{"app": "gc-test"}
}
func newOwnerRC(f *framework.Framework, name string, replicas int32, labels map[string]string) *v1.ReplicationController {
template := getPodTemplateSpec(labels)
return &v1.ReplicationController{
@@ -151,45 +147,6 @@ func newOwnerRC(f *framework.Framework, name string, replicas int32, labels map[
}
}
// verifyRemainingDeploymentsReplicaSetsPods verifies if the number
// of the remaining deployments, replica set and pods are deploymentNum,
// rsNum and podNum. It returns error if the communication with the API
// server fails.
func verifyRemainingDeploymentsReplicaSetsPods(
f *framework.Framework,
clientSet clientset.Interface,
deployment *v1beta1.Deployment,
deploymentNum, rsNum, podNum int,
) (bool, error) {
var ret = true
rs, err := clientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list rs: %v", err)
}
if len(rs.Items) != rsNum {
ret = false
By(fmt.Sprintf("expected %d rs, got %d rs", rsNum, len(rs.Items)))
}
deployments, err := clientSet.ExtensionsV1beta1().Deployments(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list deployments: %v", err)
}
if len(deployments.Items) != deploymentNum {
ret = false
By(fmt.Sprintf("expected %d Deployments, got %d Deployments", deploymentNum, len(deployments.Items)))
}
pods, err := clientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
if len(pods.Items) != podNum {
ret = false
By(fmt.Sprintf("expected %v Pods, got %d Pods", podNum, len(pods.Items)))
}
return ret, nil
}
func newGCPod(name string) *v1.Pod {
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
@@ -204,69 +161,77 @@ func newGCPod(name string) *v1.Pod {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
},
},
},
}
}
// verifyRemainingReplicationControllersPods verifies if the number of the remaining replication
// controllers and pods are rcNum and podNum. It returns error if the
// communication with the API server fails.
func verifyRemainingReplicationControllersPods(f *framework.Framework, clientSet clientset.Interface, rcNum, podNum int) (bool, error) {
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
pods, err := clientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
var ret = true
if len(pods.Items) != podNum {
ret = false
By(fmt.Sprintf("expected %d pods, got %d pods", podNum, len(pods.Items)))
}
rcs, err := rcClient.List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list replication controllers: %v", err)
}
if len(rcs.Items) != rcNum {
ret = false
By(fmt.Sprintf("expected %d RCs, got %d RCs", rcNum, len(rcs.Items)))
}
return ret, nil
}
// verifyRemainingCronJobsJobsPods verifies if the number of remaining cronjobs,
// jobs and pods. It returns error if the communication with the API server fails.
func verifyRemainingCronJobsJobsPods(f *framework.Framework, clientSet clientset.Interface,
cjNum, jobNum, podNum int) (bool, error) {
// verifyRemainingObjects verifies if the number of remaining objects.
// It returns error if the communication with the API server fails.
func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (bool, error) {
var ret = true
cronJobs, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list cronjobs: %v", err)
}
if len(cronJobs.Items) != cjNum {
ret = false
By(fmt.Sprintf("expected %d cronjobs, got %d cronjobs", cjNum, len(cronJobs.Items)))
}
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list jobs: %v", err)
}
if len(jobs.Items) != jobNum {
ret = false
By(fmt.Sprintf("expected %d jobs, got %d jobs", jobNum, len(jobs.Items)))
}
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
if len(pods.Items) != podNum {
ret = false
By(fmt.Sprintf("expected %d pods, got %d pods", podNum, len(pods.Items)))
for object, num := range objects {
switch object {
case "Pods":
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list pods: %v", err)
}
if len(pods.Items) != num {
ret = false
By(fmt.Sprintf("expected %d pods, got %d pods", num, len(pods.Items)))
}
case "Deployments":
deployments, err := f.ClientSet.ExtensionsV1beta1().Deployments(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list deployments: %v", err)
}
if len(deployments.Items) != num {
ret = false
By(fmt.Sprintf("expected %d Deployments, got %d Deployments", num, len(deployments.Items)))
}
case "ReplicaSets":
rs, err := f.ClientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list rs: %v", err)
}
if len(rs.Items) != num {
ret = false
By(fmt.Sprintf("expected %d rs, got %d rs", num, len(rs.Items)))
}
case "ReplicationControllers":
rcs, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list replication controllers: %v", err)
}
if len(rcs.Items) != num {
ret = false
By(fmt.Sprintf("expected %d RCs, got %d RCs", num, len(rcs.Items)))
}
case "CronJobs":
cronJobs, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list cronjobs: %v", err)
}
if len(cronJobs.Items) != num {
ret = false
By(fmt.Sprintf("expected %d cronjobs, got %d cronjobs", num, len(cronJobs.Items)))
}
case "Jobs":
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list jobs: %v", err)
}
if len(jobs.Items) != num {
ret = false
By(fmt.Sprintf("expected %d jobs, got %d jobs", num, len(jobs.Items)))
}
default:
return false, fmt.Errorf("object %s is not supported", object)
}
}
return ret, nil
@@ -312,7 +277,7 @@ func newCronJob(name, schedule string) *batchv1beta1.CronJob {
Containers: []v1.Container{
{
Name: "c",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sleep", "300"},
},
},
@@ -336,10 +301,9 @@ var _ = SIGDescribe("Garbage collector", func() {
f := framework.NewDefaultFramework("gc")
/*
Testname: garbage-collector-delete-rc--propagation-background
Description: Ensure that if deleteOptions.PropagationPolicy is set to Background,
then deleting a ReplicationController should cause pods created
by that RC to also be deleted.
Release : v1.9
Testname: Garbage Collector, delete replication controller, propagation policy background
Description: Create a replication controller with 2 Pods. Once RC is created and the first Pod is created, delete RC with deleteOptions.PropagationPolicy set to Background. Deleting the Replication Controller MUST cause pods created by that RC to be deleted.
*/
framework.ConformanceIt("should delete pods created by rc when not orphaning", func() {
clientSet := f.ClientSet
@@ -380,7 +344,8 @@ var _ = SIGDescribe("Garbage collector", func() {
By("wait for all pods to be garbage collected")
// wait for the RCs and Pods to reach the expected numbers.
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
return verifyRemainingReplicationControllersPods(f, clientSet, 0, 0)
objects := map[string]int{"ReplicationControllers": 0, "Pods": 0}
return verifyRemainingObjects(f, objects)
}); err != nil {
framework.Failf("failed to wait for all pods to be deleted: %v", err)
remainingPods, err := podClient.List(metav1.ListOptions{})
@@ -394,10 +359,9 @@ var _ = SIGDescribe("Garbage collector", func() {
})
/*
Testname: garbage-collector-delete-rc--propagation-orphan
Description: Ensure that if deleteOptions.PropagationPolicy is set to Orphan,
then deleting a ReplicationController should cause pods created
by that RC to be orphaned.
Release : v1.9
Testname: Garbage Collector, delete replication controller, propagation policy orphan
Description: Create a replication controller with maximum allocatable Pods between 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC with deleteOptions.PropagationPolicy set to Orphan. Deleting the Replication Controller MUST cause pods created by that RC to be orphaned.
*/
framework.ConformanceIt("should orphan pods created by rc if delete options say so", func() {
clientSet := f.ClientSet
@@ -463,6 +427,8 @@ var _ = SIGDescribe("Garbage collector", func() {
gatherMetrics(f)
})
// deleteOptions.OrphanDependents is deprecated in 1.7 and preferred to use the PropagationPolicy.
// Discussion is tracked under https://github.com/kubernetes/kubernetes/issues/65427 to promote for conformance in future.
It("should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func() {
clientSet := f.ClientSet
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
@@ -508,10 +474,9 @@ var _ = SIGDescribe("Garbage collector", func() {
})
/*
Testname: garbage-collector-delete-deployment-propagation-background
Description: Ensure that if deleteOptions.PropagationPolicy is set to Background,
then deleting a Deployment should cause ReplicaSets created
by that Deployment to also be deleted.
Release : v1.9
Testname: Garbage Collector, delete deployment, propagation policy background
Description: Create a deployment with a replicaset. Once replicaset is created , delete the deployment with deleteOptions.PropagationPolicy set to Background. Deleting the deployment MUST delete the replicaset created by the deployment and also the Pods that belong to the deployments MUST be deleted.
*/
framework.ConformanceIt("should delete RS created by deployment when not orphaning", func() {
clientSet := f.ClientSet
@@ -547,7 +512,8 @@ var _ = SIGDescribe("Garbage collector", func() {
}
By("wait for all rs to be garbage collected")
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
return verifyRemainingDeploymentsReplicaSetsPods(f, clientSet, deployment, 0, 0, 0)
objects := map[string]int{"Deployments": 0, "ReplicaSets": 0, "Pods": 0}
return verifyRemainingObjects(f, objects)
})
if err != nil {
errList := make([]error, 0)
@@ -567,10 +533,9 @@ var _ = SIGDescribe("Garbage collector", func() {
})
/*
Testname: garbage-collector-delete-deployment-propagation-true
Description: Ensure that if deleteOptions.PropagationPolicy is set to Orphan,
then deleting a Deployment should cause ReplicaSets created
by that Deployment to be orphaned.
Release : v1.9
Testname: Garbage Collector, delete deployment, propagation policy orphan
Description: Create a deployment with a replicaset. Once replicaset is created , delete the deployment with deleteOptions.PropagationPolicy set to Orphan. Deleting the deployment MUST cause the replicaset created by the deployment to be orphaned, also the Pods created by the deployments MUST be orphaned.
*/
framework.ConformanceIt("should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan", func() {
clientSet := f.ClientSet
@@ -606,7 +571,8 @@ var _ = SIGDescribe("Garbage collector", func() {
}
By("wait for 30 seconds to see if the garbage collector mistakenly deletes the rs")
time.Sleep(30 * time.Second)
ok, err := verifyRemainingDeploymentsReplicaSetsPods(f, clientSet, deployment, 0, 1, 2)
objects := map[string]int{"Deployments": 0, "ReplicaSets": 1, "Pods": 2}
ok, err := verifyRemainingObjects(f, objects)
if err != nil {
framework.Failf("Unexpected error while verifying remaining deployments, rs, and pods: %v", err)
}
@@ -641,9 +607,9 @@ var _ = SIGDescribe("Garbage collector", func() {
})
/*
Testname: garbage-collector-delete-rc-after-owned-pods
Description: Ensure that if deleteOptions.PropagationPolicy is set to Foreground,
then a ReplicationController should not be deleted until all its dependent pods are deleted.
Release : v1.9
Testname: Garbage Collector, delete replication controller, after owned pods
Description: Create a replication controller with maximum allocatable Pods between 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC with deleteOptions.PropagationPolicy set to Foreground. Deleting the Replication Controller MUST cause pods created by that RC to be deleted before the RC is deleted.
*/
framework.ConformanceIt("should keep the rc around until all its pods are deleted if the deleteOptions says so", func() {
clientSet := f.ClientSet
@@ -729,9 +695,9 @@ var _ = SIGDescribe("Garbage collector", func() {
// TODO: this should be an integration test
/*
Testname: garbage-collector-multiple-owners
Description: Ensure that if a Pod has multiple valid owners, it will not be deleted
when one of of those owners gets deleted.
Release : v1.9
Testname: Garbage Collector, multiple owners
Description: Create a replication controller RC1, with maximum allocatable Pods between 10 and 100 replicas. Create second replication controller RC2 and set RC2 as owner for half of those replicas. Once RC1 is created and the all Pods are created, delete RC1 with deleteOptions.PropagationPolicy set to Foreground. Half of the Pods that has RC2 as owner MUST not be deleted but have a deletion timestamp. Deleting the Replication Controller MUST not delete Pods that are owned by multiple replication controllers.
*/
framework.ConformanceIt("should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted", func() {
clientSet := f.ClientSet
@@ -843,9 +809,9 @@ var _ = SIGDescribe("Garbage collector", func() {
// TODO: should be an integration test
/*
Testname: garbage-collector-dependency-cycle
Description: Ensure that a dependency cycle will
not block the garbage collector.
Release : v1.9
Testname: Garbage Collector, dependency cycle
Description: Create three pods, patch them with Owner references such that pod1 has pod3, pod2 has pod1 and pod3 has pod2 as owner references respectively. Delete pod1 MUST delete all pods. The dependency cycle MUST not block the garbage collection.
*/
framework.ConformanceIt("should not be blocked by dependency circle", func() {
clientSet := f.ClientSet
@@ -939,7 +905,7 @@ var _ = SIGDescribe("Garbage collector", func() {
},
},
}
persistedOwner, err := resourceClient.Create(owner)
persistedOwner, err := resourceClient.Create(owner, metav1.CreateOptions{})
if err != nil {
framework.Failf("failed to create owner resource %q: %v", ownerName, err)
}
@@ -964,7 +930,7 @@ var _ = SIGDescribe("Garbage collector", func() {
},
},
}
persistedDependent, err := resourceClient.Create(dependent)
persistedDependent, err := resourceClient.Create(dependent, metav1.CreateOptions{})
if err != nil {
framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
}
@@ -1040,7 +1006,7 @@ var _ = SIGDescribe("Garbage collector", func() {
},
},
}
persistedOwner, err := resourceClient.Create(owner)
persistedOwner, err := resourceClient.Create(owner, metav1.CreateOptions{})
if err != nil {
framework.Failf("failed to create owner resource %q: %v", ownerName, err)
}
@@ -1065,7 +1031,7 @@ var _ = SIGDescribe("Garbage collector", func() {
},
},
}
_, err = resourceClient.Create(dependent)
_, err = resourceClient.Create(dependent, metav1.CreateOptions{})
if err != nil {
framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
}
@@ -1127,7 +1093,8 @@ var _ = SIGDescribe("Garbage collector", func() {
}
By("Verify if cronjob does not leave jobs nor pods behind")
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
return verifyRemainingCronJobsJobsPods(f, f.ClientSet, 0, 0, 0)
objects := map[string]int{"CronJobs": 0, "Jobs": 0, "Pods": 0}
return verifyRemainingObjects(f, objects)
})
if err != nil {
framework.Failf("Failed to wait for all jobs and pods to be deleted: %v", err)

View File

@@ -49,7 +49,7 @@ func stagingClientPod(name, value string) v1.Pod {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
Ports: []v1.ContainerPort{{ContainerPort: 80}},
},
},
@@ -70,7 +70,7 @@ func testingPod(name, value string) v1.Pod {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
Ports: []v1.ContainerPort{{ContainerPort: 80}},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
@@ -243,7 +243,7 @@ func newTestingCronJob(name string, value string) *batchv1beta1.CronJob {
Containers: []v1.Container{
{
Name: "c",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
VolumeMounts: []v1.VolumeMount{
{
MountPath: "/data",

View File

@@ -262,10 +262,18 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
f := framework.NewDefaultFramework("namespaces")
It("should ensure that all pods are removed when a namespace is deleted.",
/*
Testname: namespace-deletion-removes-pods
Description: Ensure that if a namespace is deleted then all pods are removed from that namespace.
*/
framework.ConformanceIt("should ensure that all pods are removed when a namespace is deleted",
func() { ensurePodsAreRemovedWhenNamespaceIsDeleted(f) })
It("should ensure that all services are removed when a namespace is deleted.",
/*
Testname: namespace-deletion-removes-services
Description: Ensure that if a namespace is deleted then all services are removed from that namespace.
*/
framework.ConformanceIt("should ensure that all services are removed when a namespace is deleted",
func() { ensureServicesAreRemovedWhenNamespaceIsDeleted(f) })
It("should delete fast enough (90 percent of 100 namespaces in 150 seconds)",

View File

@@ -52,21 +52,23 @@ const (
roleBindingName = "webhook-auth-reader"
// The webhook configuration names should not be reused between test instances.
crWebhookConfigName = "e2e-test-webhook-config-cr"
webhookConfigName = "e2e-test-webhook-config"
mutatingWebhookConfigName = "e2e-test-mutating-webhook-config"
podMutatingWebhookConfigName = "e2e-test-mutating-webhook-pod"
crMutatingWebhookConfigName = "e2e-test-mutating-webhook-config-cr"
webhookFailClosedConfigName = "e2e-test-webhook-fail-closed"
webhookForWebhooksConfigName = "e2e-test-webhook-for-webhooks-config"
removableValidatingHookName = "e2e-test-should-be-removable-validating-webhook-config"
removableMutatingHookName = "e2e-test-should-be-removable-mutating-webhook-config"
crdWebhookConfigName = "e2e-test-webhook-config-crd"
crWebhookConfigName = "e2e-test-webhook-config-cr"
webhookConfigName = "e2e-test-webhook-config"
attachingPodWebhookConfigName = "e2e-test-webhook-config-attaching-pod"
mutatingWebhookConfigName = "e2e-test-mutating-webhook-config"
podMutatingWebhookConfigName = "e2e-test-mutating-webhook-pod"
crMutatingWebhookConfigName = "e2e-test-mutating-webhook-config-cr"
webhookFailClosedConfigName = "e2e-test-webhook-fail-closed"
webhookForWebhooksConfigName = "e2e-test-webhook-for-webhooks-config"
removableValidatingHookName = "e2e-test-should-be-removable-validating-webhook-config"
removableMutatingHookName = "e2e-test-should-be-removable-mutating-webhook-config"
crdWebhookConfigName = "e2e-test-webhook-config-crd"
skipNamespaceLabelKey = "skip-webhook-admission"
skipNamespaceLabelValue = "yes"
skippedNamespaceName = "exempted-namesapce"
disallowedPodName = "disallowed-pod"
toBeAttachedPodName = "to-be-attached-pod"
hangingPodName = "hanging-pod"
disallowedConfigMapName = "disallowed-configmap"
allowedConfigMapName = "allowed-configmap"
@@ -117,6 +119,12 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
testWebhook(f)
})
It("Should be able to deny attaching pod", func() {
webhookCleanup := registerWebhookForAttachingPod(f, context)
defer webhookCleanup()
testAttachingPodWebhook(f)
})
It("Should be able to deny custom resource creation", func() {
testcrd, err := framework.CreateTestCRD(f)
if err != nil {
@@ -405,6 +413,53 @@ func registerWebhook(f *framework.Framework, context *certContext) func() {
}
}
func registerWebhookForAttachingPod(f *framework.Framework, context *certContext) func() {
client := f.ClientSet
By("Registering the webhook via the AdmissionRegistration API")
namespace := f.Namespace.Name
configName := attachingPodWebhookConfigName
// A webhook that cannot talk to server, with fail-open policy
failOpenHook := failingWebhook(namespace, "fail-open.k8s.io")
policyIgnore := v1beta1.Ignore
failOpenHook.FailurePolicy = &policyIgnore
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: configName,
},
Webhooks: []v1beta1.Webhook{
{
Name: "deny-attaching-pod.k8s.io",
Rules: []v1beta1.RuleWithOperations{{
Operations: []v1beta1.OperationType{v1beta1.Connect},
Rule: v1beta1.Rule{
APIGroups: []string{""},
APIVersions: []string{"v1"},
Resources: []string{"pods/attach"},
},
}},
ClientConfig: v1beta1.WebhookClientConfig{
Service: &v1beta1.ServiceReference{
Namespace: namespace,
Name: serviceName,
Path: strPtr("/pods/attach"),
},
CABundle: context.signingCert,
},
},
},
})
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
// The webhook configuration is honored in 10s.
time.Sleep(10 * time.Second)
return func() {
client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(configName, nil)
}
}
func registerMutatingWebhookForConfigMap(f *framework.Framework, context *certContext) func() {
client := f.ClientSet
By("Registering the mutating configmap webhook via the AdmissionRegistration API")
@@ -576,7 +631,7 @@ func testWebhook(f *framework.Framework) {
pod = hangingPod(f)
_, err = client.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(BeNil())
expectedTimeoutErr := "request did not complete within allowed duration"
expectedTimeoutErr := "request did not complete within"
if !strings.Contains(err.Error(), expectedTimeoutErr) {
framework.Failf("expect timeout error %q, got %q", expectedTimeoutErr, err.Error())
}
@@ -642,6 +697,21 @@ func testWebhook(f *framework.Framework) {
Expect(err).To(BeNil())
}
func testAttachingPodWebhook(f *framework.Framework) {
By("create a pod")
client := f.ClientSet
pod := toBeAttachedPod(f)
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(BeNil())
By("'kubectl attach' the pod, should be denied by the webhook")
_, err = framework.NewKubectlCommand("attach", fmt.Sprintf("--namespace=%v", f.Namespace.Name), pod.Name, "-i", "-c=container1").Exec()
Expect(err).NotTo(BeNil())
if e, a := "attaching to pod 'to-be-attached-pod' is not allowed", err.Error(); !strings.Contains(a, e) {
framework.Failf("unexpected 'kubectl attach' error message. expected to contain %q, got %q", e, a)
}
}
// failingWebhook returns a webhook with rule of create configmaps,
// but with an invalid client config so that server cannot communicate with it
func failingWebhook(namespace, name string) v1beta1.Webhook {
@@ -930,6 +1000,22 @@ func hangingPod(f *framework.Framework) *v1.Pod {
}
}
func toBeAttachedPod(f *framework.Framework) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: toBeAttachedPodName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container1",
Image: imageutils.GetPauseImageName(),
},
},
},
}
}
func nonCompliantConfigMap(f *framework.Framework) *v1.ConfigMap {
return &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
@@ -1100,7 +1186,7 @@ func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1
},
},
}
_, err := customResourceClient.Create(crInstance)
_, err := customResourceClient.Create(crInstance, metav1.CreateOptions{})
Expect(err).NotTo(BeNil())
expectedErrMsg := "the custom resource contains unwanted data"
if !strings.Contains(err.Error(), expectedErrMsg) {
@@ -1123,7 +1209,7 @@ func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextension
},
},
}
mutatedCR, err := customResourceClient.Create(cr)
mutatedCR, err := customResourceClient.Create(cr, metav1.CreateOptions{})
Expect(err).To(BeNil())
expectedCRData := map[string]interface{}{
"mutation-start": "yes",

View File

@@ -37,7 +37,28 @@ go_library(
"//pkg/controller/replication:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/scheduler/cache:go_default_library",
"//pkg/util/pointer:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1beta1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
@@ -45,27 +66,7 @@ go_library(
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library",
],
)

View File

@@ -5,3 +5,5 @@ approvers:
- mfojtik
reviewers:
- sig-apps-reviewers
labels:
- sig/apps

View File

@@ -34,6 +34,7 @@ import (
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/controller/job"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
@@ -298,7 +299,7 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batchv1beta1.Concur
Containers: []v1.Container{
{
Name: "c",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
VolumeMounts: []v1.VolumeMount{
{
MountPath: "/data",

View File

@@ -24,8 +24,7 @@ import (
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
apierrs "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
@@ -54,6 +53,10 @@ const (
daemonsetColorLabel = daemonsetLabelPrefix + "color"
)
// The annotation key scheduler.alpha.kubernetes.io/node-selector is for assigning
// node selectors labels to namespaces
var NamespaceNodeSelectors = []string{"scheduler.alpha.kubernetes.io/node-selector"}
// This test must be run in serial because it assumes the Daemon Set pods will
// always get scheduled. If we run other tests in parallel, this may not
// happen. In the future, running in parallel may work if we have an eviction
@@ -100,7 +103,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ns = f.Namespace.Name
c = f.ClientSet
err := clearDaemonSetNodeLabels(c)
updatedNS, err := updateNamespaceAnnotations(c, ns)
Expect(err).NotTo(HaveOccurred())
ns = updatedNS.Name
err = clearDaemonSetNodeLabels(c)
Expect(err).NotTo(HaveOccurred())
})
@@ -495,6 +504,26 @@ func clearDaemonSetNodeLabels(c clientset.Interface) error {
return nil
}
// updateNamespaceAnnotations sets node selectors related annotations on tests namespaces to empty
func updateNamespaceAnnotations(c clientset.Interface, nsName string) (*v1.Namespace, error) {
nsClient := c.CoreV1().Namespaces()
ns, err := nsClient.Get(nsName, metav1.GetOptions{})
if err != nil {
return nil, err
}
if ns.Annotations == nil {
ns.Annotations = make(map[string]string)
}
for _, n := range NamespaceNodeSelectors {
ns.Annotations[n] = ""
}
return nsClient.Update(ns)
}
func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) {
nodeClient := c.CoreV1().Nodes()
var newNode *v1.Node
@@ -520,7 +549,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
newLabels, _ = separateDaemonSetNodeLabels(newNode.Labels)
return true, err
}
if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
if se, ok := err.(*apierrors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
framework.Logf("failed to update node due to resource version conflict")
return false, nil
}
@@ -734,7 +763,7 @@ func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *a
func waitFailedDaemonPodDeleted(c clientset.Interface, pod *v1.Pod) func() (bool, error) {
return func() (bool, error) {
if _, err := c.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil {
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
return true, nil
}
return false, fmt.Errorf("failed to get failed daemon pod %q: %v", pod.Name, err)

View File

@@ -38,9 +38,9 @@ import (
clientset "k8s.io/client-go/kubernetes"
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
"k8s.io/kubernetes/test/e2e/framework"
testutil "k8s.io/kubernetes/test/utils"
utilpointer "k8s.io/utils/pointer"
)
const (
@@ -70,16 +70,35 @@ var _ = SIGDescribe("Deployment", func() {
It("deployment reaping should cascade to its replica sets and pods", func() {
testDeleteDeployment(f)
})
It("RollingUpdateDeployment should delete old pods and create new ones", func() {
/*
Testname: Deployment RollingUpdate
Description: A conformant Kubernetes distribution MUST support the Deployment with RollingUpdate strategy.
*/
framework.ConformanceIt("RollingUpdateDeployment should delete old pods and create new ones", func() {
testRollingUpdateDeployment(f)
})
It("RecreateDeployment should delete old pods and create new ones", func() {
/*
Testname: Deployment Recreate
Description: A conformant Kubernetes distribution MUST support the Deployment with Recreate strategy.
*/
framework.ConformanceIt("RecreateDeployment should delete old pods and create new ones", func() {
testRecreateDeployment(f)
})
It("deployment should delete old replica sets", func() {
/*
Testname: Deployment RevisionHistoryLimit
Description: A conformant Kubernetes distribution MUST clean up Deployment's ReplicaSets based on
the Deployment's `.spec.revisionHistoryLimit`.
*/
framework.ConformanceIt("deployment should delete old replica sets", func() {
testDeploymentCleanUpPolicy(f)
})
It("deployment should support rollover", func() {
/*
Testname: Deployment Rollover
Description: A conformant Kubernetes distribution MUST support Deployment rollover,
i.e. allow arbitrary number of changes to desired state during rolling update
before the rollout finishes.
*/
framework.ConformanceIt("deployment should support rollover", func() {
testRolloverDeployment(f)
})
It("deployment should support rollback", func() {
@@ -91,7 +110,13 @@ var _ = SIGDescribe("Deployment", func() {
It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() {
testDeploymentsControllerRef(f)
})
It("deployment should support proportional scaling", func() {
/*
Testname: Deployment Proportional Scaling
Description: A conformant Kubernetes distribution MUST support Deployment
proportional scaling, i.e. proportionally scale a Deployment's ReplicaSets
when a Deployment is scaled.
*/
framework.ConformanceIt("deployment should support proportional scaling", func() {
testProportionalScalingDeployment(f)
})
// TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues

View File

@@ -44,7 +44,7 @@ var _ = SIGDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions")
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
Expect(err).NotTo(HaveOccurred())
})
@@ -63,7 +63,7 @@ var _ = SIGDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions")
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
Expect(err).NotTo(HaveOccurred())
})
@@ -84,7 +84,7 @@ var _ = SIGDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions")
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
Expect(err).NotTo(HaveOccurred())
})

View File

@@ -106,12 +106,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
f := framework.NewDefaultFramework("network-partition")
var c clientset.Interface
var ns string
ignoreLabels := framework.ImagePullerLabels
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
_, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
_, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
Expect(err).NotTo(HaveOccurred())
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
@@ -197,7 +196,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
go controller.Run(stopCh)
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
host := framework.GetNodeExternalIP(&node)
host, err := framework.GetNodeExternalIP(&node)
framework.ExpectNoError(err)
master := framework.GetMasterAddress(c)
defer func() {
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
@@ -574,7 +574,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
go controller.Run(stopCh)
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
host := framework.GetNodeExternalIP(&node)
host, err := framework.GetNodeExternalIP(&node)
framework.ExpectNoError(err)
master := framework.GetMasterAddress(c)
defer func() {
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))

View File

@@ -17,6 +17,7 @@ limitations under the License.
package apps
import (
"context"
"fmt"
"strings"
"time"
@@ -31,6 +32,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@@ -248,6 +250,14 @@ var _ = SIGDescribe("StatefulSet", func() {
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
})
// This can't be Conformance yet because it depends on a default
// StorageClass and a dynamic provisioner.
It("should perform rolling updates and roll backs of template modifications with PVCs", func() {
By("Creating a new StatefulSet with PVCs")
*(ss.Spec.Replicas) = 3
rollbackTest(c, ns, ss)
})
/*
Release : v1.9
Testname: StatefulSet, Rolling Update
@@ -256,116 +266,7 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func() {
By("Creating a new StatefulSet")
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
sst := framework.NewStatefulSetTester(c)
sst.SetHttpProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
Expect(err).NotTo(HaveOccurred())
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).To(Equal(updateRevision),
fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
ss.Namespace, ss.Name, updateRevision, currentRevision))
pods := sst.GetPodList(ss)
for i := range pods.Items {
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
currentRevision))
}
sst.SortStatefulPods(pods)
sst.BreakPodHttpProbe(ss, &pods.Items[1])
Expect(err).NotTo(HaveOccurred())
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
newImage := NewNginxImage
oldImage := ss.Spec.Template.Spec.Containers[0].Image
By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))
Expect(oldImage).NotTo(Equal(newImage), "Incorrect test setup: should update to a different image")
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
Expect(err).NotTo(HaveOccurred())
By("Creating a new revision")
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).NotTo(Equal(updateRevision),
"Current revision should not equal update revision during rolling update")
By("Updating Pods in reverse ordinal order")
pods = sst.GetPodList(ss)
sst.SortStatefulPods(pods)
sst.RestorePodHttpProbe(ss, &pods.Items[1])
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
ss, pods = sst.WaitForRollingUpdate(ss)
Expect(ss.Status.CurrentRevision).To(Equal(updateRevision),
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
ss.Namespace,
ss.Name,
ss.Status.CurrentRevision,
updateRevision))
for i := range pods.Items {
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage),
fmt.Sprintf(" Pod %s/%s has image %s not have new image %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
newImage))
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to update revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
updateRevision))
}
By("Rolling back to a previous revision")
sst.BreakPodHttpProbe(ss, &pods.Items[1])
Expect(err).NotTo(HaveOccurred())
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
priorRevision := currentRevision
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = oldImage
})
Expect(err).NotTo(HaveOccurred())
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).NotTo(Equal(updateRevision),
"Current revision should not equal update revision during roll back")
Expect(priorRevision).To(Equal(updateRevision),
"Prior revision should equal update revision during roll back")
By("Rolling back update in reverse ordinal order")
pods = sst.GetPodList(ss)
sst.SortStatefulPods(pods)
sst.RestorePodHttpProbe(ss, &pods.Items[1])
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
ss, pods = sst.WaitForRollingUpdate(ss)
Expect(ss.Status.CurrentRevision).To(Equal(priorRevision),
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
ss.Namespace,
ss.Name,
ss.Status.CurrentRevision,
updateRevision))
for i := range pods.Items {
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage),
fmt.Sprintf("Pod %s/%s has image %s not equal to previous image %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
oldImage))
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(priorRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to prior revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
priorRevision))
}
rollbackTest(c, ns, ss)
})
/*
@@ -700,7 +601,9 @@ var _ = SIGDescribe("StatefulSet", func() {
By("Verifying that stateful set " + ssName + " was scaled up in order")
expectedOrder := []string{ssName + "-0", ssName + "-1", ssName + "-2"}
_, err = watch.Until(framework.StatefulSetTimeout, watcher, func(event watch.Event) (bool, error) {
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulSetTimeout)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) {
if event.Type != watch.Added {
return false, nil
}
@@ -731,7 +634,9 @@ var _ = SIGDescribe("StatefulSet", func() {
By("Verifying that stateful set " + ssName + " was scaled down in reverse order")
expectedOrder = []string{ssName + "-2", ssName + "-1", ssName + "-0"}
_, err = watch.Until(framework.StatefulSetTimeout, watcher, func(event watch.Event) (bool, error) {
ctx, cancel = watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulSetTimeout)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) {
if event.Type != watch.Deleted {
return false, nil
}
@@ -810,7 +715,7 @@ var _ = SIGDescribe("StatefulSet", func() {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
Ports: []v1.ContainerPort{conflictingPort},
},
},
@@ -837,8 +742,10 @@ var _ = SIGDescribe("StatefulSet", func() {
By("Waiting until stateful pod " + statefulPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name)
w, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: statefulPodName}))
framework.ExpectNoError(err)
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulPodTimeout)
defer cancel()
// we need to get UID from pod in any state and wait until stateful set controller will remove pod atleast once
_, err = watch.Until(framework.StatefulPodTimeout, w, func(event watch.Event) (bool, error) {
_, err = watchtools.UntilWithoutRetry(ctx, w, func(event watch.Event) (bool, error) {
pod := event.Object.(*v1.Pod)
switch event.Type {
case watch.Deleted:
@@ -862,7 +769,7 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.ExpectNoError(err)
By("Waiting when stateful pod " + statefulPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state")
// we may catch delete event, thats why we are waiting for running phase like this, and not with watch.Until
// we may catch delete event, that's why we are waiting for running phase like this, and not with watchtools.UntilWithoutRetry
Eventually(func() error {
statefulPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(statefulPodName, metav1.GetOptions{})
if err != nil {
@@ -1176,3 +1083,119 @@ func pollReadWithTimeout(statefulPod statefulPodTester, statefulPodNumber int, k
}
return err
}
// This function is used by two tests to test StatefulSet rollbacks: one using
// PVCs and one using no storage.
func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
sst := framework.NewStatefulSetTester(c)
sst.SetHttpProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
Expect(err).NotTo(HaveOccurred())
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).To(Equal(updateRevision),
fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
ss.Namespace, ss.Name, updateRevision, currentRevision))
pods := sst.GetPodList(ss)
for i := range pods.Items {
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
currentRevision))
}
sst.SortStatefulPods(pods)
err = sst.BreakPodHttpProbe(ss, &pods.Items[1])
Expect(err).NotTo(HaveOccurred())
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
newImage := NewNginxImage
oldImage := ss.Spec.Template.Spec.Containers[0].Image
By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))
Expect(oldImage).NotTo(Equal(newImage), "Incorrect test setup: should update to a different image")
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
Expect(err).NotTo(HaveOccurred())
By("Creating a new revision")
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).NotTo(Equal(updateRevision),
"Current revision should not equal update revision during rolling update")
By("Updating Pods in reverse ordinal order")
pods = sst.GetPodList(ss)
sst.SortStatefulPods(pods)
err = sst.RestorePodHttpProbe(ss, &pods.Items[1])
Expect(err).NotTo(HaveOccurred())
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
ss, pods = sst.WaitForRollingUpdate(ss)
Expect(ss.Status.CurrentRevision).To(Equal(updateRevision),
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
ss.Namespace,
ss.Name,
ss.Status.CurrentRevision,
updateRevision))
for i := range pods.Items {
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage),
fmt.Sprintf(" Pod %s/%s has image %s not have new image %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
newImage))
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to update revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
updateRevision))
}
By("Rolling back to a previous revision")
err = sst.BreakPodHttpProbe(ss, &pods.Items[1])
Expect(err).NotTo(HaveOccurred())
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
priorRevision := currentRevision
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = oldImage
})
Expect(err).NotTo(HaveOccurred())
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).NotTo(Equal(updateRevision),
"Current revision should not equal update revision during roll back")
Expect(priorRevision).To(Equal(updateRevision),
"Prior revision should equal update revision during roll back")
By("Rolling back update in reverse ordinal order")
pods = sst.GetPodList(ss)
sst.SortStatefulPods(pods)
sst.RestorePodHttpProbe(ss, &pods.Items[1])
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
ss, pods = sst.WaitForRollingUpdate(ss)
Expect(ss.Status.CurrentRevision).To(Equal(priorRevision),
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
ss.Namespace,
ss.Name,
ss.Status.CurrentRevision,
updateRevision))
for i := range pods.Items {
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage),
fmt.Sprintf("Pod %s/%s has image %s not equal to previous image %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
oldImage))
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(priorRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to prior revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
priorRevision))
}
}

View File

@@ -31,7 +31,7 @@ var (
CronJobGroupVersionResourceBeta = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"}
NautilusImage = imageutils.GetE2EImage(imageutils.Nautilus)
KittenImage = imageutils.GetE2EImage(imageutils.Kitten)
NginxImage = imageutils.GetE2EImage(imageutils.NginxSlim)
NewNginxImage = imageutils.GetE2EImage(imageutils.NginxSlimNew)
NginxImage = imageutils.GetE2EImage(imageutils.Nginx)
NewNginxImage = imageutils.GetE2EImage(imageutils.NginxNew)
RedisImage = imageutils.GetE2EImage(imageutils.Redis)
)

View File

@@ -12,45 +12,49 @@ go_library(
"certificates.go",
"framework.go",
"metadata_concealment.go",
"node_authn.go",
"node_authz.go",
"pod_security_policy.go",
"service_accounts.go",
],
importpath = "k8s.io/kubernetes/test/e2e/auth",
deps = [
"//pkg/master/ports:go_default_library",
"//pkg/security/apparmor:go_default_library",
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
"//pkg/security/podsecuritypolicy/util:go_default_library",
"//pkg/util/pointer:go_default_library",
"//plugin/pkg/admission/serviceaccount:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/certificates/v1beta1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/apis/audit:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/evanphx/json-patch:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/util/cert:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library",
],
)

File diff suppressed because it is too large Load Diff

View File

@@ -91,9 +91,10 @@ var _ = SIGDescribe("Certificates API", func() {
framework.Logf("waiting for CSR to be signed")
framework.ExpectNoError(wait.Poll(5*time.Second, time.Minute, func() (bool, error) {
csr, _ = csrs.Get(csrName, metav1.GetOptions{})
csr, err = csrs.Get(csrName, metav1.GetOptions{})
if err != nil {
return false, err
framework.Logf("error getting csr: %v", err)
return false, nil
}
if len(csr.Status.Certificate) == 0 {
framework.Logf("csr not signed yet")

View File

@@ -58,7 +58,7 @@ var _ = SIGDescribe("Metadata Concealment", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions")
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, int32(1))
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, int32(1))
Expect(err).NotTo(HaveOccurred())
})
})

107
vendor/k8s.io/kubernetes/test/e2e/auth/node_authn.go generated vendored Normal file
View File

@@ -0,0 +1,107 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auth
import (
"fmt"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
f := framework.NewDefaultFramework("node-authn")
var ns string
var nodeIPs []string
BeforeEach(func() {
ns = f.Namespace.Name
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(len(nodeList.Items)).NotTo(BeZero())
pickedNode := nodeList.Items[0]
nodeIPs = framework.GetNodeAddresses(&pickedNode, v1.NodeExternalIP)
// The pods running in the cluster can see the internal addresses.
nodeIPs = append(nodeIPs, framework.GetNodeAddresses(&pickedNode, v1.NodeInternalIP)...)
// make sure ServiceAccount admission controller is enabled, so secret generation on SA creation works
sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get("default", metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(len(sa.Secrets)).NotTo(BeZero())
})
It("The kubelet's main port 10250 should reject requests with no credentials", func() {
pod := createNodeAuthTestPod(f)
for _, nodeIP := range nodeIPs {
// Anonymous authentication is disabled by default
result := framework.RunHostCmdOrDie(ns, pod.Name, fmt.Sprintf("curl -sIk -o /dev/null -w '%s' https://%s:%v/metrics", "%{http_code}", nodeIP, ports.KubeletPort))
Expect(result).To(Or(Equal("401"), Equal("403")), "the kubelet's main port 10250 should reject requests with no credentials")
}
})
It("The kubelet can delegate ServiceAccount tokens to the API server", func() {
By("create a new ServiceAccount for authentication")
trueValue := true
newSA := &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: "node-auth-newSA",
},
AutomountServiceAccountToken: &trueValue,
}
_, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Create(newSA)
Expect(err).NotTo(HaveOccurred())
pod := createNodeAuthTestPod(f)
for _, nodeIP := range nodeIPs {
result := framework.RunHostCmdOrDie(ns,
pod.Name,
fmt.Sprintf("curl -sIk -o /dev/null -w '%s' --header \"Authorization: Bearer `%s`\" https://%s:%v/metrics",
"%{http_code}",
"cat /var/run/secrets/kubernetes.io/serviceaccount/token",
nodeIP, ports.KubeletPort))
Expect(result).To(Or(Equal("401"), Equal("403")), "the kubelet can delegate ServiceAccount tokens to the API server")
}
})
})
func createNodeAuthTestPod(f *framework.Framework) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "test-node-authn-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "test-node-authn",
Image: imageutils.GetE2EImage(imageutils.Hostexec),
Command: []string{"sleep 3600"},
}},
RestartPolicy: v1.RestartPolicyNever,
},
}
return f.PodClient().CreateSync(pod)
}

View File

@@ -32,10 +32,10 @@ import (
"k8s.io/kubernetes/pkg/security/apparmor"
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp"
psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util"
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
utilpointer "k8s.io/utils/pointer"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -75,7 +75,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
It("should forbid pod creation when no PSP is available", func() {
By("Running a restricted pod")
_, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "restricted"))
_, err := c.CoreV1().Pods(ns).Create(restrictedPod("restricted"))
expectForbidden(err)
})
@@ -87,11 +87,11 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
defer cleanup()
By("Running a restricted pod")
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "allowed"))
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod("allowed"))
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace))
testPrivilegedPods(f, func(pod *v1.Pod) {
testPrivilegedPods(func(pod *v1.Pod) {
_, err := c.CoreV1().Pods(ns).Create(pod)
expectForbidden(err)
})
@@ -103,11 +103,11 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
defer cleanup()
By("Running a restricted pod")
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "allowed"))
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod("allowed"))
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace))
testPrivilegedPods(f, func(pod *v1.Pod) {
testPrivilegedPods(func(pod *v1.Pod) {
_, err := c.CoreV1().Pods(ns).Create(pod)
expectForbidden(err)
})
@@ -121,7 +121,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
expectedPSP, cleanup := createAndBindPSP(f, framework.PrivilegedPSP("permissive"))
defer cleanup()
testPrivilegedPods(f, func(pod *v1.Pod) {
testPrivilegedPods(func(pod *v1.Pod) {
p, err := c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace))
@@ -143,7 +143,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
expectedPSP, cleanup := createAndBindPSPInPolicy(f, privilegedPSPInPolicy("permissive"))
defer cleanup()
testPrivilegedPods(f, func(pod *v1.Pod) {
testPrivilegedPods(func(pod *v1.Pod) {
p, err := c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace))
@@ -163,16 +163,16 @@ func expectForbidden(err error) {
Expect(apierrs.IsForbidden(err)).To(BeTrue(), "should be forbidden error")
}
func testPrivilegedPods(f *framework.Framework, tester func(pod *v1.Pod)) {
func testPrivilegedPods(tester func(pod *v1.Pod)) {
By("Running a privileged pod", func() {
privileged := restrictedPod(f, "privileged")
privileged := restrictedPod("privileged")
privileged.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true)
privileged.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation = nil
tester(privileged)
})
By("Running a HostPath pod", func() {
hostpath := restrictedPod(f, "hostpath")
hostpath := restrictedPod("hostpath")
hostpath.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{{
Name: "hp",
MountPath: "/hp",
@@ -187,26 +187,26 @@ func testPrivilegedPods(f *framework.Framework, tester func(pod *v1.Pod)) {
})
By("Running a HostNetwork pod", func() {
hostnet := restrictedPod(f, "hostnet")
hostnet := restrictedPod("hostnet")
hostnet.Spec.HostNetwork = true
tester(hostnet)
})
By("Running a HostPID pod", func() {
hostpid := restrictedPod(f, "hostpid")
hostpid := restrictedPod("hostpid")
hostpid.Spec.HostPID = true
tester(hostpid)
})
By("Running a HostIPC pod", func() {
hostipc := restrictedPod(f, "hostipc")
hostipc := restrictedPod("hostipc")
hostipc.Spec.HostIPC = true
tester(hostipc)
})
if common.IsAppArmorSupported() {
By("Running a custom AppArmor profile pod", func() {
aa := restrictedPod(f, "apparmor")
aa := restrictedPod("apparmor")
// Every node is expected to have the docker-default profile.
aa.Annotations[apparmor.ContainerAnnotationKeyPrefix+"pause"] = "localhost/docker-default"
tester(aa)
@@ -214,13 +214,13 @@ func testPrivilegedPods(f *framework.Framework, tester func(pod *v1.Pod)) {
}
By("Running an unconfined Seccomp pod", func() {
unconfined := restrictedPod(f, "seccomp")
unconfined := restrictedPod("seccomp")
unconfined.Annotations[v1.SeccompPodAnnotationKey] = "unconfined"
tester(unconfined)
})
By("Running a SYS_ADMIN pod", func() {
sysadmin := restrictedPod(f, "sysadmin")
sysadmin := restrictedPod("sysadmin")
sysadmin.Spec.Containers[0].SecurityContext.Capabilities = &v1.Capabilities{
Add: []v1.Capability{"SYS_ADMIN"},
}
@@ -311,7 +311,7 @@ func createAndBindPSPInPolicy(f *framework.Framework, pspTemplate *policy.PodSec
}
}
func restrictedPod(f *framework.Framework, name string) *v1.Pod {
func restrictedPod(name string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,

View File

@@ -153,6 +153,15 @@ var _ = SIGDescribe("ServiceAccounts", func() {
}
})
/*
Release: v1.9
Testname: Service Account Tokens Must AutoMount
Description: Ensure that Service Account keys are mounted into the Container. Pod
contains three containers each will read Service Account token,
root CA and default namespace respectively from the default API
Token Mount path. All these three files MUST exist and the Service
Account mount path MUST be auto mounted to the Container.
*/
framework.ConformanceIt("should mount an API token into pods ", func() {
var tokenContent string
var rootCAContent string
@@ -235,7 +244,33 @@ var _ = SIGDescribe("ServiceAccounts", func() {
})
})
/*
Release: v1.9
Testname: Service account tokens auto mount optionally
Description: Ensure that Service Account keys are mounted into the Pod only
when AutoMountServiceToken is not set to false. We test the
following scenarios here.
1. Create Pod, Pod Spec has AutomountServiceAccountToken set to nil
a) Service Account with default value,
b) Service Account is an configured AutomountServiceAccountToken set to true,
c) Service Account is an configured AutomountServiceAccountToken set to false
2. Create Pod, Pod Spec has AutomountServiceAccountToken set to true
a) Service Account with default value,
b) Service Account is configured with AutomountServiceAccountToken set to true,
c) Service Account is configured with AutomountServiceAccountToken set to false
3. Create Pod, Pod Spec has AutomountServiceAccountToken set to false
a) Service Account with default value,
b) Service Account is configured with AutomountServiceAccountToken set to true,
c) Service Account is configured with AutomountServiceAccountToken set to false
The Containers running in these pods MUST verify that the ServiceTokenVolume path is
auto mounted only when Pod Spec has AutomountServiceAccountToken not set to false
and ServiceAccount object has AutomountServiceAccountToken not set to false, this
include test cases 1a,1b,2a,2b and 2c.
In the test cases 1c,3a,3b and 3c the ServiceTokenVolume MUST not be auto mounted.
*/
framework.ConformanceIt("should allow opting out of API token automount ", func() {
var err error
trueValue := true
falseValue := false

View File

@@ -19,6 +19,25 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/autoscaling",
deps = [
"//pkg/apis/core:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v2beta1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/api/scheduling/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/instrumentation/monitoring:go_default_library",
@@ -30,25 +49,6 @@ go_library(
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/golang.org/x/oauth2/google:go_default_library",
"//vendor/google.golang.org/api/monitoring/v3:go_default_library",
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/api/scheduling/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)

View File

@@ -368,26 +368,6 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
})
func makeUnschedulable(f *framework.Framework, nodes []v1.Node) error {
for _, node := range nodes {
err := makeNodeUnschedulable(f.ClientSet, &node)
if err != nil {
return err
}
}
return nil
}
func makeSchedulable(f *framework.Framework, nodes []v1.Node) error {
for _, node := range nodes {
err := makeNodeSchedulable(f.ClientSet, &node, false)
if err != nil {
return err
}
}
return nil
}
func anyKey(input map[string]int) string {
for k := range input {
return k

View File

@@ -22,6 +22,7 @@ import (
"io/ioutil"
"math"
"net/http"
"os"
"os/exec"
"regexp"
"strconv"
@@ -82,6 +83,8 @@ const (
expendablePriorityClassName = "expendable-priority"
highPriorityClassName = "high-priority"
gpuLabel = "cloud.google.com/gke-accelerator"
)
var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
@@ -112,8 +115,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
nodeCount = len(nodes.Items)
coreCount = 0
for _, node := range nodes.Items {
quentity := node.Status.Capacity[v1.ResourceCPU]
coreCount += quentity.Value()
quantity := node.Status.Allocatable[v1.ResourceCPU]
coreCount += quantity.Value()
}
By(fmt.Sprintf("Initial number of schedulable nodes: %v", nodeCount))
Expect(nodeCount).NotTo(BeZero())
@@ -129,16 +132,11 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
err = enableAutoscaler("default-pool", 3, 5)
framework.ExpectNoError(err)
}
Expect(getNAPNodePoolsNumber()).Should(Equal(0))
}
})
AfterEach(func() {
if framework.ProviderIs("gke") {
By("Remove changes introduced by NAP tests")
removeNAPNodePools()
disableAutoprovisioning()
}
framework.SkipUnlessProviderIs("gce", "gke")
By(fmt.Sprintf("Restoring initial size of the cluster"))
setMigSizes(originalSizes)
expectedNodes := 0
@@ -207,107 +205,123 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]",
func() { simpleScaleUpTest(0) })
supportedGpuTypes := []string{"nvidia-tesla-k80", "nvidia-tesla-v100", "nvidia-tesla-p100"}
for _, gpuType := range supportedGpuTypes {
gpuType := gpuType // create new variable for each iteration step
gpuType := os.Getenv("TESTED_GPU_TYPE")
It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
defer deleteNodePool(gpuPoolName)
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet()
installNvidiaDriversDaemonSet()
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
By("Schedule a pod which requires GPU")
framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc"))
By("Schedule a pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
})
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
})
It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
defer deleteNodePool(gpuPoolName)
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet()
installNvidiaDriversDaemonSet()
By("Schedule a single pod which requires GPU")
framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc"))
By("Schedule a single pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2))
defer disableAutoscaler(gpuPoolName, 0, 2)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2))
defer disableAutoscaler(gpuPoolName, 0, 2)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
By("Scale GPU deployment")
framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true)
By("Scale GPU deployment")
framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true)
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(2))
})
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(2))
})
It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
defer deleteNodePool(gpuPoolName)
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet()
installNvidiaDriversDaemonSet()
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
// Expect gpu pool to stay intact
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
})
// Expect gpu pool to stay intact
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
})
It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
defer deleteNodePool(gpuPoolName)
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet()
installNvidiaDriversDaemonSet()
By("Schedule a single pod which requires GPU")
framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc"))
By("Schedule a single pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
By("Remove the only POD requiring GPU")
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
By("Remove the only POD requiring GPU")
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
})
}
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
})
It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]",
func() {
@@ -355,6 +369,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
// We wait for nodes to become schedulable to make sure the new nodes
// will be returned by getPoolNodes below.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, resizeTimeout))
glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
By("Getting memory available on new nodes, so we can account for it when creating RC")
@@ -362,7 +379,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
Expect(len(nodes)).Should(Equal(extraNodes))
extraMemMb := 0
for _, node := range nodes {
mem := node.Status.Capacity[v1.ResourceMemory]
mem := node.Status.Allocatable[v1.ResourceMemory]
extraMemMb += int((&mem).Value() / 1024 / 1024)
}
@@ -859,7 +876,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}
By("Block network connectivity to some nodes to simulate unhealthy cluster")
nodesToBreakCount := int(math.Floor(math.Max(float64(unhealthyClusterThreshold), 0.5*float64(clusterSize))))
nodesToBreakCount := int(math.Ceil(math.Max(float64(unhealthyClusterThreshold), 0.5*float64(clusterSize))))
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
@@ -894,106 +911,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectNoError(framework.WaitForReadyNodes(c, len(nodes.Items), nodesRecoverTimeout))
})
It("should add new node and new node pool on too big pod, scale down to 1 and scale down to 0 [Feature:ClusterSizeAutoscalingScaleWithNAP]", func() {
framework.SkipUnlessProviderIs("gke")
framework.ExpectNoError(enableAutoprovisioning(""))
By("Create first pod")
cleanupFunc1 := ReserveMemory(f, "memory-reservation1", 1, int(1.1*float64(memAllocatableMb)), true, defaultTimeout)
defer func() {
if cleanupFunc1 != nil {
cleanupFunc1()
}
}()
By("Waiting for scale up")
// Verify that cluster size increased.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, defaultTimeout))
By("Check if NAP group was created")
Expect(getNAPNodePoolsNumber()).Should(Equal(1))
By("Create second pod")
cleanupFunc2 := ReserveMemory(f, "memory-reservation2", 1, int(1.1*float64(memAllocatableMb)), true, defaultTimeout)
defer func() {
if cleanupFunc2 != nil {
cleanupFunc2()
}
}()
By("Waiting for scale up")
// Verify that cluster size increased.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+2 }, defaultTimeout))
By("Delete first pod")
cleanupFunc1()
cleanupFunc1 = nil
By("Waiting for scale down to 1")
// Verify that cluster size decreased.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, scaleDownTimeout))
By("Delete second pod")
cleanupFunc2()
cleanupFunc2 = nil
By("Waiting for scale down to 0")
// Verify that cluster size decreased.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
By("Waiting for NAP group remove")
framework.ExpectNoError(waitTillAllNAPNodePoolsAreRemoved())
By("Check if NAP group was removeed")
Expect(getNAPNodePoolsNumber()).Should(Equal(0))
})
It("shouldn't add new node group if not needed [Feature:ClusterSizeAutoscalingScaleWithNAP]", func() {
framework.SkipUnlessProviderIs("gke")
framework.ExpectNoError(enableAutoprovisioning(""))
By("Create pods")
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
cleanupFunc := ReserveMemory(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout)
defer cleanupFunc()
By("Waiting for scale up")
// Verify that cluster size increased.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
By("Check if NAP group was created hoping id didn't happen")
Expect(getNAPNodePoolsNumber()).Should(Equal(0))
})
It("shouldn't scale up if cores limit too low, should scale up after limit is changed [Feature:ClusterSizeAutoscalingScaleWithNAP]", func() {
framework.SkipUnlessProviderIs("gke")
By(fmt.Sprintf("Set core limit to %d", coreCount))
framework.ExpectNoError(enableAutoprovisioning(fmt.Sprintf(`"resource_limits":{"name":"cpu", "minimum":2, "maximum":%d}, "resource_limits":{"name":"memory", "minimum":0, "maximum":10000000}`, coreCount)))
// Create pod allocating 1.1 allocatable for present nodes. Bigger node will have to be created.
cleanupFunc := ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, time.Second)
defer cleanupFunc()
By(fmt.Sprintf("Waiting for scale up hoping it won't happen, sleep for %s", scaleUpTimeout.String()))
time.Sleep(scaleUpTimeout)
// Verify that cluster size is not changed
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, time.Second))
By("Change resource limits")
framework.ExpectNoError(enableAutoprovisioning(fmt.Sprintf(`"resource_limits":{"name":"cpu", "minimum":2, "maximum":%d}, "resource_limits":{"name":"memory", "minimum":0, "maximum":10000000}`, coreCount+5)))
By("Wait for scale up")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
By("Check if NAP group was created")
Expect(getNAPNodePoolsNumber()).Should(Equal(1))
})
It("should create new node if there is no node for node selector [Feature:ClusterSizeAutoscalingScaleWithNAP]", func() {
framework.SkipUnlessProviderIs("gke")
framework.ExpectNoError(enableAutoprovisioning(""))
// Create pod allocating 0.7 allocatable for present nodes with node selector.
cleanupFunc := ReserveMemoryWithSelector(f, "memory-reservation", 1, int(0.7*float64(memAllocatableMb)), true, scaleUpTimeout, map[string]string{"test": "test"})
defer cleanupFunc()
By("Waiting for scale up")
// Verify that cluster size increased.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, defaultTimeout))
By("Check if NAP group was created")
Expect(getNAPNodePoolsNumber()).Should(Equal(1))
})
It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
framework.SkipUnlessProviderIs("gce")
defer createPriorityClasses(f)()
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), false, time.Second, expendablePriorityClassName)
@@ -1006,8 +924,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
framework.SkipUnlessProviderIs("gce")
defer createPriorityClasses(f)()
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName)
@@ -1018,8 +934,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func() {
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
framework.SkipUnlessProviderIs("gce")
defer createPriorityClasses(f)()
// Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node.
cleanupFunc1 := ReserveMemoryWithPriority(f, "memory-reservation1", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, expendablePriorityClassName)
@@ -1032,8 +946,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
framework.SkipUnlessProviderIs("gce")
defer createPriorityClasses(f)()
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
@@ -1045,8 +957,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
framework.SkipUnlessProviderIs("gce")
defer createPriorityClasses(f)()
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
@@ -1248,17 +1158,6 @@ func disableAutoscaler(nodePool string, minCount, maxCount int) error {
return fmt.Errorf("autoscaler still enabled, last error: %v", finalErr)
}
func isAutoprovisioningEnabled() (bool, error) {
strBody, err := getCluster("v1alpha1")
if err != nil {
return false, err
}
if strings.Contains(strBody, "\"enableNodeAutoprovisioning\": true") {
return true, nil
}
return false, nil
}
func executeHTTPRequest(method string, url string, body string) (string, error) {
client := &http.Client{}
req, err := http.NewRequest(method, url, strings.NewReader(body))
@@ -1278,126 +1177,6 @@ func executeHTTPRequest(method string, url string, body string) (string, error)
return string(respBody), nil
}
func enableAutoprovisioning(resourceLimits string) error {
By("Using API to enable autoprovisioning.")
var body string
if resourceLimits != "" {
body = fmt.Sprintf(`{"update": {"desired_cluster_autoscaling": {"enable_node_autoprovisioning": true, %s}}}`, resourceLimits)
} else {
body = `{"update": {"desired_cluster_autoscaling": {"enable_node_autoprovisioning": true, "resource_limits":{"name":"cpu", "minimum":0, "maximum":100}, "resource_limits":{"name":"memory", "minimum":0, "maximum":10000000}}}}`
}
_, err := executeHTTPRequest(http.MethodPut, getGKEClusterURL("v1alpha1"), body)
if err != nil {
glog.Errorf("Request error: %s", err.Error())
return err
}
glog.Infof("Wait for enabling autoprovisioning.")
for start := time.Now(); time.Since(start) < gkeUpdateTimeout; time.Sleep(30 * time.Second) {
enabled, err := isAutoprovisioningEnabled()
if err != nil {
glog.Errorf("Error: %s", err.Error())
return err
}
if enabled {
By("Autoprovisioning enabled.")
return nil
}
glog.Infof("Waiting for enabling autoprovisioning")
}
return fmt.Errorf("autoprovisioning wasn't enabled (timeout).")
}
func disableAutoprovisioning() error {
enabled, err := isAutoprovisioningEnabled()
if err != nil {
glog.Errorf("Error: %s", err.Error())
return err
}
if !enabled {
By("Autoprovisioning disabled.")
return nil
}
By("Using API to disable autoprovisioning.")
_, err = executeHTTPRequest(http.MethodPut, getGKEClusterURL("v1alpha1"), "{\"update\": {\"desired_cluster_autoscaling\": {}}}")
if err != nil {
glog.Errorf("Request error: %s", err.Error())
return err
}
By("Wait for disabling autoprovisioning.")
for start := time.Now(); time.Since(start) < gkeUpdateTimeout; time.Sleep(30 * time.Second) {
enabled, err := isAutoprovisioningEnabled()
if err != nil {
glog.Errorf("Error: %s", err.Error())
return err
}
if !enabled {
By("Autoprovisioning disabled.")
return nil
}
By("Waiting for disabling autoprovisioning")
}
return fmt.Errorf("autoprovisioning wasn't disabled (timeout).")
}
func getNAPNodePools() ([]string, error) {
if framework.ProviderIs("gke") {
args := []string{"container", "node-pools", "list", "--cluster=" + framework.TestContext.CloudConfig.Cluster}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
if err != nil {
glog.Errorf("Failed to get instance groups: %v", string(output))
return nil, err
}
re := regexp.MustCompile("nap.* ")
lines := re.FindAllString(string(output), -1)
for i, line := range lines {
lines[i] = line[:strings.Index(line, " ")]
}
return lines, nil
} else {
return nil, fmt.Errorf("provider does not support NAP")
}
}
func removeNAPNodePools() error {
By("Remove NAP node pools")
pools, err := getNAPNodePools()
if err != nil {
return err
}
for _, pool := range pools {
By("Remove node pool: " + pool)
suffix := fmt.Sprintf("projects/%s/zones/%s/clusters/%s/nodePools/%s",
framework.TestContext.CloudConfig.ProjectID,
framework.TestContext.CloudConfig.Zone,
framework.TestContext.CloudConfig.Cluster,
pool)
_, err := executeHTTPRequest(http.MethodDelete, getGKEURL("v1alpha1", suffix), "")
if err != nil {
glog.Errorf("Request error: %s", err.Error())
return err
}
}
err = waitTillAllNAPNodePoolsAreRemoved()
if err != nil {
glog.Errorf(fmt.Sprintf("Couldn't remove NAP groups: %s", err.Error()))
}
return err
}
func getNAPNodePoolsNumber() int {
groups, err := getNAPNodePools()
framework.ExpectNoError(err)
return len(groups)
}
func waitTillAllNAPNodePoolsAreRemoved() error {
By("Wait till all NAP node pools are removed")
err := wait.PollImmediate(5*time.Second, defaultTimeout, func() (bool, error) {
return getNAPNodePoolsNumber() == 0, nil
})
return err
}
func addNodePool(name string, machineType string, numNodes int) {
args := []string{"container", "node-pools", "create", name, "--quiet",
"--machine-type=" + machineType,
@@ -1504,7 +1283,7 @@ func doPut(url, content string) (string, error) {
return strBody, nil
}
func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, priorityClassName string) func() error {
func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration, priorityClassName string) func() error {
By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
request := int64(1024 * 1024 * megabytes / replicas)
config := &testutils.RCConfig{
@@ -1517,6 +1296,7 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
Replicas: replicas,
MemRequest: request,
NodeSelector: selector,
Tolerations: tolerations,
PriorityClassName: priorityClassName,
}
for start := time.Now(); time.Since(start) < rcCreationRetryTimeout; time.Sleep(rcCreationRetryDelay) {
@@ -1539,19 +1319,19 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
// ReserveMemoryWithPriority creates a replication controller with pods with priority that, in summation,
// request the specified amount of memory.
func ReserveMemoryWithPriority(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, priorityClassName string) func() error {
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, priorityClassName)
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, nil, priorityClassName)
}
// ReserveMemoryWithSelector creates a replication controller with pods with node selector that, in summation,
// request the specified amount of memory.
func ReserveMemoryWithSelector(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string) func() error {
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, selector, "")
func ReserveMemoryWithSelectorAndTolerations(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration) func() error {
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, selector, tolerations, "")
}
// ReserveMemory creates a replication controller with pods that, in summation,
// request the specified amount of memory.
func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration) func() error {
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, "")
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, nil, "")
}
// WaitForClusterSizeFunc waits until the cluster size matches the given function.
@@ -1742,7 +1522,13 @@ func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAdd
return fmt.Errorf("Failed to remove taint from node in allowed number of retries")
}
func scheduleGpuPod(f *framework.Framework, id string) error {
// ScheduleAnySingleGpuPod schedules a pod which requires single GPU of any type
func ScheduleAnySingleGpuPod(f *framework.Framework, id string) error {
return ScheduleGpuPod(f, id, "", 1)
}
// ScheduleGpuPod schedules a pod which requires a given number of gpus of given type
func ScheduleGpuPod(f *framework.Framework, id string, gpuType string, gpuLimit int64) error {
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
@@ -1751,10 +1537,14 @@ func scheduleGpuPod(f *framework.Framework, id string) error {
Timeout: 3 * scaleUpTimeout, // spinning up GPU node is slow
Image: imageutils.GetPauseImageName(),
Replicas: 1,
GpuLimit: 1,
GpuLimit: gpuLimit,
Labels: map[string]string{"requires-gpu": "yes"},
}
if gpuType != "" {
config.NodeSelector = map[string]string{gpuLabel: gpuType}
}
err := framework.RunRC(*config)
if err != nil {
return err
@@ -2152,12 +1942,18 @@ func createPriorityClasses(f *framework.Framework) func() {
}
for className, priority := range priorityClasses {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority})
if err != nil {
glog.Errorf("Error creating priority class: %v", err)
}
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
}
return func() {
for className := range priorityClasses {
f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(className, nil)
err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(className, nil)
if err != nil {
glog.Errorf("Error deleting priority class: %v", err)
}
}
}
}

View File

@@ -46,7 +46,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", fu
})
SIGDescribe("[Serial] [Slow] ReplicaSet", func() {
// CPU tests via deployments
// CPU tests via ReplicaSets
It(titleUp, func() {
scaleUp("rs", common.KindReplicaSet, false, rc, f)
})

View File

@@ -12,6 +12,7 @@ go_library(
"autoscaling_utils.go",
"configmap.go",
"configmap_volume.go",
"container.go",
"container_probe.go",
"docker_containers.go",
"downward_api.go",
@@ -22,13 +23,18 @@ go_library(
"host_path.go",
"init_container.go",
"kubelet_etc_hosts.go",
"lifecycle_hook.go",
"networking.go",
"node_lease.go",
"pods.go",
"privileged.go",
"projected.go",
"runtime.go",
"secrets.go",
"secrets_volume.go",
"security_context.go",
"sysctl.go",
"ttlafterfinished.go",
"util.go",
"volumes.go",
],
@@ -40,32 +46,40 @@ go_library(
"//pkg/client/conditions:go_default_library",
"//pkg/kubelet:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/kubelet/images:go_default_library",
"//pkg/kubelet/sysctl:go_default_library",
"//pkg/security/apparmor:go_default_library",
"//pkg/util/slice:go_default_library",
"//pkg/util/version:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/coordination/v1beta1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/onsi/gomega/types:go_default_library",
"//vendor/golang.org/x/net/websocket:go_default_library",
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/scale:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
],
)

View File

@@ -117,7 +117,7 @@ done`, testCmd)
Affinity: loaderAffinity,
Containers: []api.Container{{
Name: "test",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", testCmd},
}},
RestartPolicy: api.RestartPolicyNever,

View File

@@ -43,7 +43,7 @@ import (
const (
dynamicConsumptionTimeInSeconds = 30
staticConsumptionTimeInSeconds = 3600
dynamicRequestSizeInMillicores = 20
dynamicRequestSizeInMillicores = 100
dynamicRequestSizeInMegabytes = 100
dynamicRequestSizeCustomMetric = 10
port = 80

View File

@@ -24,15 +24,16 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var _ = Describe("[sig-api-machinery] ConfigMap", func() {
f := framework.NewDefaultFramework("configmap")
/*
Testname: configmap-in-env-field
Description: Make sure config map value can be used as an environment
variable in the container (on container.env field)
Release : v1.9
Testname: ConfigMap, from environment field
Description: Create a Pod with an environment variable value set using a value from ConfigMap. A ConfigMap value MUST be accessible in the container environment.
*/
framework.ConformanceIt("should be consumable via environment variable [NodeConformance]", func() {
name := "configmap-test-" + string(uuid.NewUUID())
@@ -51,7 +52,7 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() {
Containers: []v1.Container{
{
Name: "env-test",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
Env: []v1.EnvVar{
{
@@ -78,9 +79,9 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() {
})
/*
Testname: configmap-envfrom-field
Description: Make sure config map value can be used as an source for
environment variables in the container (on container.envFrom field)
Release: v1.9
Testname: ConfigMap, from environment variables
Description: Create a Pod with a environment source from ConfigMap. All ConfigMap values MUST be available as environment variables in the container.
*/
framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func() {
name := "configmap-test-" + string(uuid.NewUUID())
@@ -99,7 +100,7 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() {
Containers: []v1.Container{
{
Name: "env-test",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
EnvFrom: []v1.EnvFromSource{
{

View File

@@ -27,24 +27,25 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var _ = Describe("[sig-storage] ConfigMap", func() {
f := framework.NewDefaultFramework("configmap")
/*
Testname: configmap-nomap-simple
Description: Make sure config map without mappings works by mounting it
to a volume with a custom path (mapping) on the pod with no other settings.
Release : v1.9
Testname: ConfigMap Volume, without mapping
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644.
*/
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
doConfigMapE2EWithoutMappings(f, 0, 0, nil)
})
/*
Testname: configmap-nomap-default-mode
Description: Make sure config map without mappings works by mounting it
to a volume with a custom path (mapping) on the pod with defaultMode set
Release : v1.9
Testname: ConfigMap Volume, without mapping, volume mode set
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. File mode is changed to a custom value of '0x400'. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST be set to the custom value of 0x400
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() {
defaultMode := int32(0400)
@@ -57,9 +58,9 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
})
/*
Testname: configmap-nomap-user
Description: Make sure config map without mappings works by mounting it
to a volume with a custom path (mapping) on the pod as non-root.
Release : v1.9
Testname: ConfigMap Volume, without mapping, non-root user
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644.
*/
framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func() {
doConfigMapE2EWithoutMappings(f, 1000, 0, nil)
@@ -70,19 +71,18 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
})
/*
Testname: configmap-simple-mapped
Description: Make sure config map works by mounting it to a volume with
a custom path (mapping) on the pod with no other settings and make sure
the pod actually consumes it.
Release : v1.9
Testname: ConfigMap Volume, with mapping
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
doConfigMapE2EWithMappings(f, 0, 0, nil)
})
/*
Testname: configmap-with-item-mode-mapped
Description: Make sure config map works with an item mode (e.g. 0400)
for the config map item.
Release : v1.9
Testname: ConfigMap Volume, with mapping, volume mode set
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. File mode is changed to a custom value of '0x400'. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST be set to the custom value of 0x400
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [NodeConformance]", func() {
mode := int32(0400)
@@ -90,8 +90,9 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
})
/*
Testname: configmap-simple-user-mapped
Description: Make sure config map works when it is mounted as non-root.
Release : v1.9
Testname: ConfigMap Volume, with mapping, non-root user
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func() {
doConfigMapE2EWithMappings(f, 1000, 0, nil)
@@ -102,9 +103,9 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
})
/*
Testname: configmap-update-test
Description: Make sure update operation is working on config map and
the result is observed on volumes mounted in containers.
Release : v1.9
Testname: ConfigMap Volume, update
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the ConfigMap is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod.
*/
framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
@@ -151,7 +152,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
Containers: []v1.Container{
{
Name: containerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -184,7 +185,12 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2"))
})
It("binary data should be reflected in volume [NodeConformance]", func() {
/*
Release: v1.12
Testname: ConfigMap Volume, text data, binary data
Description: The ConfigMap that is created with text data and binary data MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. ConfigMap's text data and binary data MUST be verified by reading the content from the mounted files in the Pod.
*/
framework.ConformanceIt("binary data should be reflected in volume [NodeConformance]", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
@@ -233,7 +239,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
Containers: []v1.Container{
{
Name: containerName1,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -245,7 +251,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
},
{
Name: containerName2,
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"hexdump", "-C", "/etc/configmap-volume/dump.bin"},
VolumeMounts: []v1.VolumeMount{
{
@@ -276,9 +282,9 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
})
/*
Testname: configmap-CUD-test
Description: Make sure Create, Update, Delete operations are all working
on config map and the result is observed on volumes mounted in containers.
Release : v1.9
Testname: ConfigMap Volume, create, update and delete
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the config map is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod. Also when the item(file) is deleted from the map that MUST result in a error reading that item(file).
*/
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
@@ -379,7 +385,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
Containers: []v1.Container{
{
Name: deleteContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/delete/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -391,7 +397,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
},
{
Name: updateContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/update/data-3"},
VolumeMounts: []v1.VolumeMount{
{
@@ -403,7 +409,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
},
{
Name: createContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -459,9 +465,9 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
})
/*
Testname: configmap-multiple-volumes
Description: Make sure config map works when it mounted as two different
volumes on the same node.
Release : v1.9
Testname: ConfigMap Volume, multiple volume maps
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to multiple paths in the Pod. The content MUST be accessible from all the mapped volume mounts.
*/
framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func() {
var (
@@ -509,7 +515,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{"--file_content=/etc/configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -589,7 +595,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d
Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/configmap-volume/data-1",
"--file_mode=/etc/configmap-volume/data-1"},
@@ -675,7 +681,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item
Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{"--file_content=/etc/configmap-volume/path/to/data-2",
"--file_mode=/etc/configmap-volume/path/to/data-2"},
VolumeMounts: []v1.VolumeMount{

134
vendor/k8s.io/kubernetes/test/e2e/common/container.go generated vendored Normal file
View File

@@ -0,0 +1,134 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
ContainerStatusRetryTimeout = time.Minute * 5
ContainerStatusPollInterval = time.Second * 1
)
// One pod one container
type ConformanceContainer struct {
Container v1.Container
RestartPolicy v1.RestartPolicy
Volumes []v1.Volume
ImagePullSecrets []string
PodClient *framework.PodClient
podName string
PodSecurityContext *v1.PodSecurityContext
}
func (cc *ConformanceContainer) Create() {
cc.podName = cc.Container.Name + string(uuid.NewUUID())
imagePullSecrets := []v1.LocalObjectReference{}
for _, s := range cc.ImagePullSecrets {
imagePullSecrets = append(imagePullSecrets, v1.LocalObjectReference{Name: s})
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: cc.podName,
},
Spec: v1.PodSpec{
RestartPolicy: cc.RestartPolicy,
Containers: []v1.Container{
cc.Container,
},
SecurityContext: cc.PodSecurityContext,
Volumes: cc.Volumes,
ImagePullSecrets: imagePullSecrets,
},
}
cc.PodClient.Create(pod)
}
func (cc *ConformanceContainer) Delete() error {
return cc.PodClient.Delete(cc.podName, metav1.NewDeleteOptions(0))
}
func (cc *ConformanceContainer) IsReady() (bool, error) {
pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
if err != nil {
return false, err
}
return podutil.IsPodReady(pod), nil
}
func (cc *ConformanceContainer) GetPhase() (v1.PodPhase, error) {
pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
if err != nil {
return v1.PodUnknown, err
}
return pod.Status.Phase, nil
}
func (cc *ConformanceContainer) GetStatus() (v1.ContainerStatus, error) {
pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
if err != nil {
return v1.ContainerStatus{}, err
}
statuses := pod.Status.ContainerStatuses
if len(statuses) != 1 || statuses[0].Name != cc.Container.Name {
return v1.ContainerStatus{}, fmt.Errorf("unexpected container statuses %v", statuses)
}
return statuses[0], nil
}
func (cc *ConformanceContainer) Present() (bool, error) {
_, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
if err == nil {
return true, nil
}
if errors.IsNotFound(err) {
return false, nil
}
return false, err
}
type ContainerState string
const (
ContainerStateWaiting ContainerState = "Waiting"
ContainerStateRunning ContainerState = "Running"
ContainerStateTerminated ContainerState = "Terminated"
ContainerStateUnknown ContainerState = "Unknown"
)
func GetContainerState(state v1.ContainerState) ContainerState {
if state.Waiting != nil {
return ContainerStateWaiting
}
if state.Running != nil {
return ContainerStateRunning
}
if state.Terminated != nil {
return ContainerStateTerminated
}
return ContainerStateUnknown
}

View File

@@ -50,9 +50,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
/*
Testname: pods-readiness-probe-initial-delay
Description: Make sure that pod with readiness probe should not be
ready before initial delay and never restart.
Release : v1.9
Testname: Pod readiness probe, with initial delay
Description: Create a Pod that is configured with a initial delay set on the readiness probe. Check the Pod Start time to compare to the initial delay. The Pod MUST be ready only after the specified initial delay.
*/
framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart [NodeConformance]", func() {
p := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil))
@@ -82,9 +82,10 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
/*
Testname: pods-readiness-probe-failure
Description: Make sure that pod with readiness probe that fails should
never be ready and never restart.
Release : v1.9
Testname: Pod readiness probe, failure
Description: Create a Pod with a readiness probe that fails consistently. When this Pod is created,
then the Pod MUST never be ready, never be running and restart count MUST be zero.
*/
framework.ConformanceIt("with readiness probe that fails should never be ready and never restart [NodeConformance]", func() {
p := podClient.Create(makePodSpec(probe.withFailing().build(), nil))
@@ -107,9 +108,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
/*
Testname: pods-cat-liveness-probe-restarted
Description: Make sure the pod is restarted with a cat /tmp/health
liveness probe.
Release : v1.9
Testname: Pod liveness probe, using local file, restart
Description: Create a Pod with liveness probe that that uses ExecAction handler to cat /temp/health file. The Container deletes the file /temp/health after 10 second, triggering liveness probe to fail. The Pod MUST now be killed and restarted incrementing restart count to 1.
*/
framework.ConformanceIt("should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func() {
runLivenessTest(f, &v1.Pod{
@@ -121,7 +122,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
Containers: []v1.Container{
{
Name: "liveness",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 10; rm -rf /tmp/health; sleep 600"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
@@ -139,9 +140,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
/*
Testname: pods-cat-liveness-probe-not-restarted
Description: Make sure the pod is not restarted with a cat /tmp/health
liveness probe.
Release : v1.9
Testname: Pod liveness probe, using local file, no restart
Description: Pod is created with liveness probe that uses exec command to cat /temp/health file. Liveness probe MUST not fail to check health and the restart count should remain 0.
*/
framework.ConformanceIt("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func() {
runLivenessTest(f, &v1.Pod{
@@ -153,7 +154,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
Containers: []v1.Container{
{
Name: "liveness",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 600"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
@@ -171,9 +172,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
/*
Testname: pods-http-liveness-probe-restarted
Description: Make sure when http liveness probe fails, the pod should
be restarted.
Release : v1.9
Testname: Pod liveness probe, using http endpoint, restart
Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1.
*/
framework.ConformanceIt("should be restarted with a /healthz http liveness probe [NodeConformance]", func() {
runLivenessTest(f, &v1.Pod{
@@ -205,9 +206,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
// Slow by design (5 min)
/*
Testname: pods-restart-count
Description: Make sure when a pod gets restarted, its start count
should increase.
Release : v1.9
Testname: Pod liveness probe, using http endpoint, multiple restarts (slow)
Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1. The liveness probe must fail again after restart once the http handler for /healthz enpoind on the Pod returns an http error after 10 seconds from the start. Restart counts MUST increment everytime health check fails, measure upto 5 restart.
*/
framework.ConformanceIt("should have monotonically increasing restart count [Slow][NodeConformance]", func() {
runLivenessTest(f, &v1.Pod{
@@ -238,9 +239,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
/*
Testname: pods-http-liveness-probe-not-restarted
Description: Make sure when http liveness probe succeeds, the pod
should not be restarted.
Release : v1.9
Testname: Pod liveness probe, using http endpoint, failure
Description: A Pod is created with liveness probe on http endpoint /. Liveness probe on this endpoint will not fail. When liveness probe does not fail then the restart count MUST remain zero.
*/
framework.ConformanceIt("should *not* be restarted with a /healthz http liveness probe [NodeConformance]", func() {
runLivenessTest(f, &v1.Pod{
@@ -252,7 +253,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
Containers: []v1.Container{
{
Name: "liveness",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
Ports: []v1.ContainerPort{{ContainerPort: 80}},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
@@ -272,9 +273,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
/*
Testname: pods-docker-liveness-probe-timeout
Description: Make sure that the pod is restarted with a docker exec
liveness probe with timeout.
Release : v1.9
Testname: Pod liveness probe, docker exec, restart
Description: A Pod is created with liveness probe with a Exec action on the Pod. If the liveness probe call does not return within the timeout specified, liveness probe MUST restart the Pod.
*/
It("should be restarted with a docker exec liveness probe with timeout ", func() {
// TODO: enable this test once the default exec handler supports timeout.
@@ -288,7 +289,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
Containers: []v1.Container{
{
Name: "liveness",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "sleep 600"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{

View File

@@ -28,10 +28,9 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
f := framework.NewDefaultFramework("containers")
/*
Testname: container-without-command-args
Description: When a Pod is created neither 'command' nor 'args' are
provided for a Container, ensure that the docker image's default
command and args are used.
Release : v1.9
Testname: Docker containers, without command and arguments
Description: Default command and arguments from the docker image entrypoint MUST be used when Pod does not specify the container command
*/
framework.ConformanceIt("should use the image defaults if command and args are blank [NodeConformance]", func() {
f.TestContainerOutput("use defaults", entrypointTestPod(), 0, []string{
@@ -40,10 +39,9 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
})
/*
Testname: container-with-args
Description: When a Pod is created and 'args' are provided for a
Container, ensure that they take precedent to the docker image's
default arguments, but that the default command is used.
Release : v1.9
Testname: Docker containers, with arguments
Description: Default command and from the docker image entrypoint MUST be used when Pod does not specify the container command but the arguments from Pod spec MUST override when specified.
*/
framework.ConformanceIt("should be able to override the image's default arguments (docker cmd) [NodeConformance]", func() {
pod := entrypointTestPod()
@@ -57,10 +55,9 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
// Note: when you override the entrypoint, the image's arguments (docker cmd)
// are ignored.
/*
Testname: container-with-command
Description: When a Pod is created and 'command' is provided for a
Container, ensure that it takes precedent to the docker image's default
command.
Release : v1.9
Testname: Docker containers, with command
Description: Default command from the docker image entrypoint MUST NOT be used when Pod specifies the container command. Command from Pod spec MUST override the command in the image.
*/
framework.ConformanceIt("should be able to override the image's default command (docker entrypoint) [NodeConformance]", func() {
pod := entrypointTestPod()
@@ -72,10 +69,9 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
})
/*
Testname: container-with-command-args
Description: When a Pod is created and 'command' and 'args' are
provided for a Container, ensure that they take precedent to the docker
image's default command and arguments.
Release : v1.9
Testname: Docker containers, with command and arguments
Description: Default command and arguments from the docker image entrypoint MUST NOT be used when Pod specifies the container command and arguments. Command and arguments from Pod spec MUST override the command and arguments in the image.
*/
framework.ConformanceIt("should be able to override the image's default command and arguments [NodeConformance]", func() {
pod := entrypointTestPod()

View File

@@ -25,6 +25,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
)
@@ -38,9 +39,9 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
f := framework.NewDefaultFramework("downward-api")
/*
Testname: downwardapi-env-name-namespace-podip
Description: Ensure that downward API can provide pod's name, namespace
and IP address as environment variables.
Release : v1.9
Testname: DownwardAPI, environment for name, namespace and ip
Description: Downward API MUST expose Pod and Container fields as environment variables. Specify Pod Name, namespace and IP as environment variable in the Pod Spec are visible at runtime in the container.
*/
framework.ConformanceIt("should provide pod name, namespace and IP address as env vars [NodeConformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID())
@@ -84,9 +85,9 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
})
/*
Testname: downwardapi-env-host-ip
Description: Ensure that downward API can provide an IP address for
host node as an environment variable.
Release : v1.9
Testname: DownwardAPI, environment for host ip
Description: Downward API MUST expose Pod and Container fields as environment variables. Specify host IP as environment variable in the Pod Spec are visible at runtime in the container.
*/
framework.ConformanceIt("should provide host IP as an env var [NodeConformance]", func() {
framework.SkipUnlessServerVersionGTE(hostIPVersion, f.ClientSet.Discovery())
@@ -111,9 +112,9 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
})
/*
Testname: downwardapi-env-limits-requests
Description: Ensure that downward API can provide CPU/memory limit
and CPU/memory request as environment variables.
Release : v1.9
Testname: DownwardAPI, environment for CPU and memory limits and requests
Description: Downward API MUST expose CPU request amd Memory request set through environment variables at runtime in the container.
*/
framework.ConformanceIt("should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID())
@@ -162,10 +163,9 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
})
/*
Testname: downwardapi-env-default-allocatable
Description: Ensure that downward API can provide default node
allocatable values for CPU and memory as environment variables if CPU
and memory limits are not specified for a container.
Release : v1.9
Testname: DownwardAPI, environment for default CPU and memory limits and requests
Description: Downward API MUST expose CPU request amd Memory limits set through environment variables at runtime in the container.
*/
framework.ConformanceIt("should provide default limits.cpu/memory from node allocatable [NodeConformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID())
@@ -200,7 +200,7 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
Env: env,
},
@@ -213,9 +213,9 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
})
/*
Testname: downwardapi-env-pod-uid
Description: Ensure that downward API can provide pod UID as an
environment variable.
Release : v1.9
Testname: DownwardAPI, environment for Pod UID
Description: Downward API MUST expose Pod UID set through environment variables at runtime in the container.
*/
framework.ConformanceIt("should provide pod UID as env vars [NodeConformance]", func() {
framework.SkipUnlessServerVersionGTE(podUIDVersion, f.ClientSet.Discovery())
@@ -300,7 +300,7 @@ var _ = framework.KubeDescribe("Downward API [Serial] [Disruptive] [NodeFeature:
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
Env: env,
},
@@ -325,7 +325,7 @@ func testDownwardAPI(f *framework.Framework, podName string, env []v1.EnvVar, ex
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
@@ -357,7 +357,7 @@ func testDownwardAPIForEphemeralStorage(f *framework.Framework, podName string,
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{

View File

@@ -25,6 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -40,9 +41,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-podname
Description: Ensure that downward API can provide pod's name through
DownwardAPIVolumeFiles.
Release : v1.9
Testname: DownwardAPI volume, pod name
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the Pod name. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
*/
framework.ConformanceIt("should provide podname only [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -54,9 +55,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-set-default-mode
Description: Ensure that downward API can set default file permission
mode for DownwardAPIVolumeFiles if no mode is specified.
Release : v1.9
Testname: DownwardAPI volume, volume mode 0400
Description: A Pod is configured with DownwardAPIVolumeSource with the volumesource mode set to -r-------- and DownwardAPIVolumeFiles contains a item for the Pod name. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
*/
framework.ConformanceIt("should set DefaultMode on files [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -69,9 +70,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-set-mode
Description: Ensure that downward API can set file permission mode for
DownwardAPIVolumeFiles.
Release : v1.9
Testname: DownwardAPI volume, file mode 0400
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the Pod name with the file mode set to -r--------. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
*/
framework.ConformanceIt("should set mode on item file [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -113,9 +114,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-update-label
Description: Ensure that downward API updates labels in
DownwardAPIVolumeFiles when pod's labels get modified.
Release : v1.9
Testname: DownwardAPI volume, update label
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains list of items for each of the Pod labels. The container runtime MUST be able to access Pod labels from the specified path on the mounted volume. Update the labels by adding a new label to the running Pod. The new label MUST be available from the mounted volume.
*/
framework.ConformanceIt("should update labels on modification [NodeConformance]", func() {
labels := map[string]string{}
@@ -145,9 +146,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-update-annotation
Description: Ensure that downward API updates annotations in
DownwardAPIVolumeFiles when pod's annotations get modified.
Release : v1.9
Testname: DownwardAPI volume, update annotations
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains list of items for each of the Pod annotations. The container runtime MUST be able to access Pod annotations from the specified path on the mounted volume. Update the annotations by adding a new annotation to the running Pod. The new annotation MUST be available from the mounted volume.
*/
framework.ConformanceIt("should update annotations on modification [NodeConformance]", func() {
annotations := map[string]string{}
@@ -179,9 +180,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-cpu-limit
Description: Ensure that downward API can provide container's CPU limit
through DownwardAPIVolumeFiles.
Release : v1.9
Testname: DownwardAPI volume, CPU limits
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the CPU limits. The container runtime MUST be able to access CPU limits from the specified path on the mounted volume.
*/
framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -193,9 +194,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-memory-limit
Description: Ensure that downward API can provide container's memory
limit through DownwardAPIVolumeFiles.
Release : v1.9
Testname: DownwardAPI volume, memory limits
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the memory limits. The container runtime MUST be able to access memory limits from the specified path on the mounted volume.
*/
framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -207,9 +208,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-cpu-request
Description: Ensure that downward API can provide container's CPU
request through DownwardAPIVolumeFiles.
Release : v1.9
Testname: DownwardAPI volume, CPU request
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the CPU request. The container runtime MUST be able to access CPU request from the specified path on the mounted volume.
*/
framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -221,9 +222,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-memory-request
Description: Ensure that downward API can provide container's memory
request through DownwardAPIVolumeFiles.
Release : v1.9
Testname: DownwardAPI volume, memory request
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the memory request. The container runtime MUST be able to access memory request from the specified path on the mounted volume.
*/
framework.ConformanceIt("should provide container's memory request [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -235,10 +236,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-default-cpu
Description: Ensure that downward API can provide default node
allocatable value for CPU through DownwardAPIVolumeFiles if CPU
limit is not specified for a container.
Release : v1.9
Testname: DownwardAPI volume, CPU limit, default node allocatable
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the CPU limits. CPU limits is not specified for the container. The container runtime MUST be able to access CPU limits from the specified path on the mounted volume and the value MUST be default node allocatable.
*/
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -248,10 +248,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-default-memory
Description: Ensure that downward API can provide default node
allocatable value for memory through DownwardAPIVolumeFiles if memory
limit is not specified for a container.
Release : v1.9
Testname: DownwardAPI volume, memory limit, default node allocatable
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the memory limits. memory limits is not specified for the container. The container runtime MUST be able to access memory limits from the specified path on the mounted volume and the value MUST be default node allocatable.
*/
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -268,7 +267,7 @@ func downwardAPIVolumePodForModeTest(name, filePath string, itemMode, defaultMod
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--file_mode=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
@@ -294,7 +293,7 @@ func downwardAPIVolumePodForSimpleTest(name string, filePath string) *v1.Pod {
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--file_content=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
@@ -325,7 +324,7 @@ func downwardAPIVolumeBaseContainers(name, filePath string) []v1.Container {
return []v1.Container{
{
Name: name,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--file_content=" + filePath},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
@@ -353,7 +352,7 @@ func downwardAPIVolumeDefaultBaseContainer(name, filePath string) []v1.Container
return []v1.Container{
{
Name: name,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--file_content=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
@@ -372,7 +371,7 @@ func downwardAPIVolumePodForUpdateTest(name string, labels, annotations map[stri
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath},
VolumeMounts: []v1.VolumeMount{
{

View File

@@ -67,139 +67,126 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() {
})
/*
Testname: volume-emptydir-mode-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure the volume has 0777 unix file permissions and tmpfs
mount type.
Release : v1.9
Testname: EmptyDir, medium memory, volume mode default
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs.
*/
framework.ConformanceIt("volume on tmpfs should have the correct mode [NodeConformance]", func() {
doTestVolumeMode(f, testImageRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-root-0644-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a root owned file with 0644 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
Release : v1.9
Testname: EmptyDir, medium memory, volume mode 0644
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0644. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (root,0644,tmpfs) [NodeConformance]", func() {
doTest0644(f, testImageRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-root-0666-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a root owned file with 0666 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
Release : v1.9
Testname: EmptyDir, medium memory, volume mode 0666
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0666. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (root,0666,tmpfs) [NodeConformance]", func() {
doTest0666(f, testImageRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-root-0777-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a root owned file with 0777 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
Release : v1.9
Testname: EmptyDir, medium memory, volume mode 0777
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0777. The volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (root,0777,tmpfs) [NodeConformance]", func() {
doTest0777(f, testImageRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-user-0644-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a user owned file with 0644 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
Release : v1.9
Testname: EmptyDir, medium memory, volume mode 0644, non-root user
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0644. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (non-root,0644,tmpfs) [NodeConformance]", func() {
doTest0644(f, testImageNonRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-user-0666-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a user owned file with 0666 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
Release : v1.9
Testname: EmptyDir, medium memory, volume mode 0666,, non-root user
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0666. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (non-root,0666,tmpfs) [NodeConformance]", func() {
doTest0666(f, testImageNonRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-user-0777-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a user owned file with 0777 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
Release : v1.9
Testname: EmptyDir, medium memory, volume mode 0777, non-root user
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0777. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (non-root,0777,tmpfs) [NodeConformance]", func() {
doTest0777(f, testImageNonRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-mode
Description: For a Pod created with an 'emptyDir' Volume, ensure the
volume has 0777 unix file permissions.
Release : v1.9
Testname: EmptyDir, medium default, volume mode default
Description: A Pod created with an 'emptyDir' Volume, the volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs.
*/
framework.ConformanceIt("volume on default medium should have the correct mode [NodeConformance]", func() {
doTestVolumeMode(f, testImageRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-root-0644
Description: For a Pod created with an 'emptyDir' Volume, ensure a
root owned file with 0644 unix file permissions is created and enforced
correctly.
Release : v1.9
Testname: EmptyDir, medium default, volume mode 0644
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0644. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (root,0644,default) [NodeConformance]", func() {
doTest0644(f, testImageRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-root-0666
Description: For a Pod created with an 'emptyDir' Volume, ensure a
root owned file with 0666 unix file permissions is created and enforced
correctly.
Release : v1.9
Testname: EmptyDir, medium default, volume mode 0666
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0666. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (root,0666,default) [NodeConformance]", func() {
doTest0666(f, testImageRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-root-0777
Description: For a Pod created with an 'emptyDir' Volume, ensure a
root owned file with 0777 unix file permissions is created and enforced
correctly.
Release : v1.9
Testname: EmptyDir, medium default, volume mode 0777
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0777. The volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (root,0777,default) [NodeConformance]", func() {
doTest0777(f, testImageRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-user-0644
Description: For a Pod created with an 'emptyDir' Volume, ensure a
user owned file with 0644 unix file permissions is created and enforced
correctly.
Release : v1.9
Testname: EmptyDir, medium default, volume mode 0644
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0644. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (non-root,0644,default) [NodeConformance]", func() {
doTest0644(f, testImageNonRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-user-0666
Description: For a Pod created with an 'emptyDir' Volume, ensure a
user owned file with 0666 unix file permissions is created and enforced
correctly.
Release : v1.9
Testname: EmptyDir, medium default, volume mode 0666
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0666. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (non-root,0666,default) [NodeConformance]", func() {
doTest0666(f, testImageNonRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-user-0777
Description: For a Pod created with an 'emptyDir' Volume, ensure a
user owned file with 0777 unix file permissions is created and enforced
correctly.
Release : v1.9
Testname: EmptyDir, medium default, volume mode 0777
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0777. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (non-root,0777,default) [NodeConformance]", func() {
doTest0777(f, testImageNonRootUid, v1.StorageMediumDefault)

View File

@@ -21,6 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -33,9 +34,9 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
f := framework.NewDefaultFramework("var-expansion")
/*
Testname: var-expansion-env
Description: Make sure environment variables can be set using an
expansion of previously defined environment variables
Release : v1.9
Testname: Environment variables, expansion
Description: Create a Pod with environment variables. Environment variables defined using previously defined environment variables MUST expand to proper values.
*/
framework.ConformanceIt("should allow composing env vars into new env vars [NodeConformance]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
@@ -48,7 +49,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
Env: []v1.EnvVar{
{
@@ -78,9 +79,9 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
})
/*
Testname: var-expansion-command
Description: Make sure a container's commands can be set using an
expansion of environment variables.
Release : v1.9
Testname: Environment variables, command expansion
Description: Create a Pod with environment variables and container command using them. Container command using the defined environment variables MUST expand to proper values.
*/
framework.ConformanceIt("should allow substituting values in a container's command [NodeConformance]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
@@ -93,7 +94,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "TEST_VAR=wrong echo \"$(TEST_VAR)\""},
Env: []v1.EnvVar{
{
@@ -113,9 +114,9 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
})
/*
Testname: var-expansion-arg
Description: Make sure a container's args can be set using an
expansion of environment variables.
Release : v1.9
Testname: Environment variables, command argument expansion
Description: Create a Pod with environment variables and container command arguments using them. Container command arguments using the defined environment variables MUST expand to proper values.
*/
framework.ConformanceIt("should allow substituting values in a container's args [NodeConformance]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
@@ -128,7 +129,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c"},
Args: []string{"TEST_VAR=wrong echo \"$(TEST_VAR)\""},
Env: []v1.EnvVar{
@@ -164,7 +165,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "test -d /testcontainer/" + podName + ";echo $?"},
Env: []v1.EnvVar{
{
@@ -225,7 +226,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Env: []v1.EnvVar{
{
Name: "POD_NAME",
@@ -274,7 +275,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Env: []v1.EnvVar{
{
Name: "POD_NAME",

View File

@@ -24,6 +24,7 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
)
@@ -40,10 +41,9 @@ var _ = Describe("[sig-storage] HostPath", func() {
})
/*
Testname: volume-hostpath-mode
Description: For a Pod created with a 'HostPath' Volume, ensure the
volume is a directory with 0777 unix file permissions and that is has
the sticky bit (mode flag t) set.
Release : v1.9
Testname: Host path, volume mode default
Description: Create a Pod with host volume mounted. The volume mounted MUST be a directory with permissions mode -rwxrwxrwx and that is has the sticky bit (mode flag t) set.
*/
framework.ConformanceIt("should give a volume the correct mode [NodeConformance]", func() {
source := &v1.HostPathVolumeSource{
@@ -136,7 +136,9 @@ var _ = Describe("[sig-storage] HostPath", func() {
// Create the subPath directory on the host
existing := path.Join(source.Path, subPath)
result, err := framework.SSH(fmt.Sprintf("mkdir -p %s", existing), framework.GetNodeExternalIP(&nodeList.Items[0]), framework.TestContext.Provider)
externalIP, err := framework.GetNodeExternalIP(&nodeList.Items[0])
framework.ExpectNoError(err)
result, err := framework.SSH(fmt.Sprintf("mkdir -p %s", existing), externalIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
framework.ExpectNoError(err)
if result.Code != 0 {
@@ -180,7 +182,9 @@ var _ = Describe("[sig-storage] HostPath", func() {
// Create the subPath file on the host
existing := path.Join(source.Path, subPath)
result, err := framework.SSH(fmt.Sprintf("echo \"mount-tester new file\" > %s", existing), framework.GetNodeExternalIP(&nodeList.Items[0]), framework.TestContext.Provider)
externalIP, err := framework.GetNodeExternalIP(&nodeList.Items[0])
framework.ExpectNoError(err)
result, err := framework.SSH(fmt.Sprintf("echo \"mount-tester new file\" > %s", existing), externalIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
framework.ExpectNoError(err)
if result.Code != 0 {
@@ -236,7 +240,7 @@ func testPodWithHostVol(path string, source *v1.HostPathVolumeSource) *v1.Pod {
Containers: []v1.Container{
{
Name: containerName1,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
@@ -249,7 +253,7 @@ func testPodWithHostVol(path string, source *v1.HostPathVolumeSource) *v1.Pod {
},
{
Name: containerName2,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"fmt"
"strconv"
"time"
@@ -26,6 +27,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/watch"
watchtools "k8s.io/client-go/tools/watch"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/test/e2e/framework"
@@ -42,7 +44,15 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
podClient = f.PodClient()
})
It("should invoke init containers on a RestartNever pod", func() {
/*
Release: v1.12
Testname: init-container-starts-app-restartnever-pod
Description: Ensure that all InitContainers are started
and all containers in pod are voluntarily terminated with exit status 0,
and the system is not going to restart any of these containers
when Pod has restart policy as RestartNever.
*/
framework.ConformanceIt("should invoke init containers on a RestartNever pod", func() {
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
@@ -59,19 +69,19 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
InitContainers: []v1.Container{
{
Name: "init1",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
},
{
Name: "init2",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
},
},
Containers: []v1.Container{
{
Name: "run1",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
},
},
@@ -82,7 +92,9 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodCompleted)
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
defer cancel()
event, err := watchtools.UntilWithoutRetry(ctx, wr, conditions.PodCompleted)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
@@ -99,7 +111,15 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
}
})
It("should invoke init containers on a RestartAlways pod", func() {
/*
Release: v1.12
Testname: init-container-starts-app-restartalways-pod
Description: Ensure that all InitContainers are started
and all containers in pod started
and at least one container is still running or is in the process of being restarted
when Pod has restart policy as RestartAlways.
*/
framework.ConformanceIt("should invoke init containers on a RestartAlways pod", func() {
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
@@ -115,12 +135,12 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
InitContainers: []v1.Container{
{
Name: "init1",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
},
{
Name: "init2",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
},
},
@@ -131,7 +151,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(50*1024*1024, resource.DecimalSI),
},
},
},
@@ -143,7 +163,9 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodRunning)
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
defer cancel()
event, err := watchtools.UntilWithoutRetry(ctx, wr, conditions.PodRunning)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
@@ -160,7 +182,15 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
}
})
It("should not start app containers if init containers fail on a RestartAlways pod", func() {
/*
Release: v1.12
Testname: init-container-fails-stops-app-restartalways-pod
Description: Ensure that app container is not started
when all InitContainers failed to start
and Pod has restarted for few occurrences
and pod has restart policy as RestartAlways.
*/
framework.ConformanceIt("should not start app containers if init containers fail on a RestartAlways pod", func() {
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
@@ -177,12 +207,12 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
InitContainers: []v1.Container{
{
Name: "init1",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/false"},
},
{
Name: "init2",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
},
},
@@ -193,7 +223,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(50*1024*1024, resource.DecimalSI),
},
},
},
@@ -206,8 +236,10 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(
framework.PodStartTimeout, wr,
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
defer cancel()
event, err := watchtools.UntilWithoutRetry(
ctx, wr,
// check for the first container to fail at least once
func(evt watch.Event) (bool, error) {
switch t := evt.Object.(type) {
@@ -268,7 +300,13 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
})
It("should not start app containers and fail the pod if init containers fail on a RestartNever pod", func() {
/*
Release: v1.12
Testname: init-container-fails-stops-app-restartnever-pod
Description: Ensure that app container is not started
when atleast one InitContainer fails to start and Pod has restart policy as RestartNever.
*/
framework.ConformanceIt("should not start app containers and fail the pod if init containers fail on a RestartNever pod", func() {
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
@@ -285,24 +323,24 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
InitContainers: []v1.Container{
{
Name: "init1",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
},
{
Name: "init2",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/false"},
},
},
Containers: []v1.Container{
{
Name: "run1",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(50*1024*1024, resource.DecimalSI),
},
},
},
@@ -316,8 +354,10 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(
framework.PodStartTimeout, wr,
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
defer cancel()
event, err := watchtools.UntilWithoutRetry(
ctx, wr,
// check for the second container to fail at least once
func(evt watch.Event) (bool, error) {
switch t := evt.Object.(type) {

View File

@@ -51,9 +51,12 @@ var _ = framework.KubeDescribe("KubeletManagedEtcHosts", func() {
}
/*
Testname: kubelet-managed-etc-hosts
Description: Make sure Kubelet correctly manages /etc/hosts and mounts
it into the container.
Release : v1.9
Testname: Kubelet, managed etc hosts
Description: Create a Pod with containers with hostNetwork set to false, one of the containers mounts the /etc/hosts file form the host. Create a second Pod with hostNetwork set to true.
1. The Pod with hostNetwork=false MUST have /etc/hosts of containers managed by the Kubelet.
2. The Pod with hostNetwork=false but the container mounts /etc/hosts file from the host. The /etc/hosts file MUST not be managed by the Kubelet.
3. The Pod with hostNetwork=true , /etc/hosts file MUST not be managed by the Kubelet.
*/
framework.ConformanceIt("should test kubelet managed /etc/hosts file [NodeConformance]", func() {
By("Setting up the test")

View File

@@ -0,0 +1,173 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
f := framework.NewDefaultFramework("container-lifecycle-hook")
var podClient *framework.PodClient
const (
podCheckInterval = 1 * time.Second
postStartWaitTimeout = 2 * time.Minute
preStopWaitTimeout = 30 * time.Second
)
Context("when create a pod with lifecycle hook", func() {
var targetIP string
podHandleHookRequest := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-handle-http-request",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pod-handle-http-request",
Image: imageutils.GetE2EImage(imageutils.Netexec),
Ports: []v1.ContainerPort{
{
ContainerPort: 8080,
Protocol: v1.ProtocolTCP,
},
},
},
},
},
}
BeforeEach(func() {
podClient = f.PodClient()
By("create the container to handle the HTTPGet hook request.")
newPod := podClient.CreateSync(podHandleHookRequest)
targetIP = newPod.Status.PodIP
})
testPodWithHook := func(podWithHook *v1.Pod) {
By("create the pod with lifecycle hook")
podClient.CreateSync(podWithHook)
if podWithHook.Spec.Containers[0].Lifecycle.PostStart != nil {
By("check poststart hook")
Eventually(func() error {
return podClient.MatchContainerOutput(podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[0].Name,
`GET /echo\?msg=poststart`)
}, postStartWaitTimeout, podCheckInterval).Should(BeNil())
}
By("delete the pod with lifecycle hook")
podClient.DeleteSync(podWithHook.Name, metav1.NewDeleteOptions(15), framework.DefaultPodDeletionTimeout)
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
By("check prestop hook")
Eventually(func() error {
return podClient.MatchContainerOutput(podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[0].Name,
`GET /echo\?msg=prestop`)
}, preStopWaitTimeout, podCheckInterval).Should(BeNil())
}
}
/*
Release : v1.9
Testname: Pod Lifecycle, post start exec hook
Description: When a post start handler is specified in the container lifecycle using a Exec action, then the handler MUST be invoked after the start of the container. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a post start that invokes the server pod using ExecAction to validate that the post start is executed.
*/
framework.ConformanceIt("should execute poststart exec hook properly [NodeConformance]", func() {
lifecycle := &v1.Lifecycle{
PostStart: &v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"sh", "-c", "curl http://" + targetIP + ":8080/echo?msg=poststart"},
},
},
}
podWithHook := getPodWithHook("pod-with-poststart-exec-hook", imageutils.GetE2EImage(imageutils.Hostexec), lifecycle)
testPodWithHook(podWithHook)
})
/*
Release : v1.9
Testname: Pod Lifecycle, prestop exec hook
Description: When a pre-stop handler is specified in the container lifecycle using a Exec action, then the handler MUST be invoked before the container is terminated. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a pre-stop that invokes the server pod using ExecAction to validate that the pre-stop is executed.
*/
framework.ConformanceIt("should execute prestop exec hook properly [NodeConformance]", func() {
lifecycle := &v1.Lifecycle{
PreStop: &v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"sh", "-c", "curl http://" + targetIP + ":8080/echo?msg=prestop"},
},
},
}
podWithHook := getPodWithHook("pod-with-prestop-exec-hook", imageutils.GetE2EImage(imageutils.Hostexec), lifecycle)
testPodWithHook(podWithHook)
})
/*
Release : v1.9
Testname: Pod Lifecycle, post start http hook
Description: When a post start handler is specified in the container lifecycle using a HttpGet action, then the handler MUST be invoked after the start of the container. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a post start that invokes the server pod to validate that the post start is executed.
*/
framework.ConformanceIt("should execute poststart http hook properly [NodeConformance]", func() {
lifecycle := &v1.Lifecycle{
PostStart: &v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/echo?msg=poststart",
Host: targetIP,
Port: intstr.FromInt(8080),
},
},
}
podWithHook := getPodWithHook("pod-with-poststart-http-hook", imageutils.GetPauseImageName(), lifecycle)
testPodWithHook(podWithHook)
})
/*
Release : v1.9
Testname: Pod Lifecycle, prestop http hook
Description: When a pre-stop handler is specified in the container lifecycle using a HttpGet action, then the handler MUST be invoked before the container is terminated. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a pre-stop that invokes the server pod to validate that the pre-stop is executed.
*/
framework.ConformanceIt("should execute prestop http hook properly [NodeConformance]", func() {
lifecycle := &v1.Lifecycle{
PreStop: &v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/echo?msg=prestop",
Host: targetIP,
Port: intstr.FromInt(8080),
},
},
}
podWithHook := getPodWithHook("pod-with-prestop-http-hook", imageutils.GetPauseImageName(), lifecycle)
testPodWithHook(podWithHook)
})
})
})
func getPodWithHook(name string, image string, lifecycle *v1.Lifecycle) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: name,
Image: image,
Lifecycle: lifecycle,
},
},
},
}
}

View File

@@ -31,9 +31,10 @@ var _ = Describe("[sig-network] Networking", func() {
// expect exactly one unique hostname. Each of these endpoints reports
// its own hostname.
/*
Testname: networking-intra-pod-http
Description: Try to hit test endpoints from a test container and make
sure each of them can report a unique hostname.
Release : v1.9
Testname: Networking, intra pod http
Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes.
The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/
framework.ConformanceIt("should function for intra-pod communication: http [NodeConformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f)
@@ -43,9 +44,10 @@ var _ = Describe("[sig-network] Networking", func() {
})
/*
Testname: networking-intra-pod-udp
Description: Try to hit test endpoints from a test container using udp
and make sure each of them can report a unique hostname.
Release : v1.9
Testname: Networking, intra pod udp
Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes.
The kubectl exec on the webserver container MUST reach a udp port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/
framework.ConformanceIt("should function for intra-pod communication: udp [NodeConformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f)
@@ -55,9 +57,10 @@ var _ = Describe("[sig-network] Networking", func() {
})
/*
Testname: networking-node-pod-http
Description: Try to hit test endpoints from the pod and make sure each
of them can report a unique hostname.
Release : v1.9
Testname: Networking, intra pod http, from node
Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes.
The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster using a http post(protocol=tcp) and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/
framework.ConformanceIt("should function for node-pod communication: http [NodeConformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f)
@@ -67,9 +70,10 @@ var _ = Describe("[sig-network] Networking", func() {
})
/*
Testname: networking-node-pod-udp
Description: Try to hit test endpoints from the pod using udp and make sure
each of them can report a unique hostname.
Release : v1.9
Testname: Networking, intra pod http, from node
Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes.
The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster using a http post(protocol=udp) and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/
framework.ConformanceIt("should function for node-pod communication: udp [NodeConformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f)

92
vendor/k8s.io/kubernetes/test/e2e/common/node_lease.go generated vendored Normal file
View File

@@ -0,0 +1,92 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"time"
coordv1beta1 "k8s.io/api/coordination/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("[Feature:NodeLease][NodeAlphaFeature:NodeLease]", func() {
f := framework.NewDefaultFramework("node-lease-test")
Context("when the NodeLease feature is enabled", func() {
It("the Kubelet should create and update a lease in the kube-node-lease namespace", func() {
leaseClient := f.ClientSet.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease)
var (
err error
lease *coordv1beta1.Lease
)
// check that lease for this Kubelet exists in the kube-node-lease namespace
Eventually(func() error {
lease, err = leaseClient.Get(framework.TestContext.NodeName, metav1.GetOptions{})
if err != nil {
return err
}
return nil
}, 5*time.Minute, 5*time.Second).Should(BeNil())
// check basic expectations for the lease
Expect(expectLease(lease)).To(BeNil())
// ensure that at least one lease renewal happens within the
// lease duration by checking for a change to renew time
Eventually(func() error {
newLease, err := leaseClient.Get(framework.TestContext.NodeName, metav1.GetOptions{})
if err != nil {
return err
}
// check basic expectations for the latest lease
if err := expectLease(newLease); err != nil {
return err
}
// check that RenewTime has been updated on the latest lease
newTime := (*newLease.Spec.RenewTime).Time
oldTime := (*lease.Spec.RenewTime).Time
if !newTime.After(oldTime) {
return fmt.Errorf("new lease has time %v, which is not after old lease time %v", newTime, oldTime)
}
return nil
}, time.Duration(*lease.Spec.LeaseDurationSeconds)*time.Second,
time.Duration(*lease.Spec.LeaseDurationSeconds/3)*time.Second)
})
})
})
func expectLease(lease *coordv1beta1.Lease) error {
// expect values for HolderIdentity, LeaseDurationSeconds, and RenewTime
if lease.Spec.HolderIdentity == nil {
return fmt.Errorf("Spec.HolderIdentity should not be nil")
}
if lease.Spec.LeaseDurationSeconds == nil {
return fmt.Errorf("Spec.LeaseDurationSeconds should not be nil")
}
if lease.Spec.RenewTime == nil {
return fmt.Errorf("Spec.RenewTime should not be nil")
}
// ensure that the HolderIdentity matches the node name
if *lease.Spec.HolderIdentity != framework.TestContext.NodeName {
return fmt.Errorf("Spec.HolderIdentity (%v) should match the node name (%v)", *lease.Spec.HolderIdentity, framework.TestContext.NodeName)
}
return nil
}

View File

@@ -39,6 +39,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/types"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@@ -46,6 +47,10 @@ var (
buildBackOffDuration = time.Minute
syncLoopFrequency = 10 * time.Second
maxBackOffTolerance = time.Duration(1.3 * float64(kubelet.MaxContainerBackOff))
// maxReadyStatusUpdateTolerance specifies the latency that allows kubelet to update pod status.
// When kubelet is under heavy load (tests may be parallelized), the delay may be longer, hence
// causing tests to be flaky.
maxReadyStatusUpdateTolerance = 10 * time.Second
)
// testHostIP tests that a pod gets a host IP
@@ -129,9 +134,9 @@ var _ = framework.KubeDescribe("Pods", func() {
})
/*
Testname: pods-created-pod-assigned-hostip
Description: Make sure when a pod is created that it is assigned a host IP
Address.
Release : v1.9
Testname: Pods, assigned hostip
Description: Create a Pod. Pod status MUST return successfully and contains a valid IP address.
*/
framework.ConformanceIt("should get a host IP [NodeConformance]", func() {
name := "pod-hostip-" + string(uuid.NewUUID())
@@ -151,9 +156,9 @@ var _ = framework.KubeDescribe("Pods", func() {
})
/*
Testname: pods-submitted-removed
Description: Makes sure a pod is created, a watch can be setup for the pod,
pod creation was observed, pod is deleted, and pod deletion is observed.
Release : v1.9
Testname: Pods, lifecycle
Description: A Pod is created with a unique label. Pod MUST be accessible when queried using the label selector upon creation. Add a watch, check if the Pod is running. Pod then deleted, The pod deletion timestamp is observed. The watch MUST return the pod deleted event. Query with the original selector for the Pod MUST return empty list.
*/
framework.ConformanceIt("should be submitted and removed [NodeConformance]", func() {
By("creating the pod")
@@ -171,7 +176,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
},
},
},
@@ -277,8 +282,9 @@ var _ = framework.KubeDescribe("Pods", func() {
})
/*
Testname: pods-updated-successfully
Description: Make sure it is possible to successfully update a pod's labels.
Release : v1.9
Testname: Pods, update
Description: Create a Pod with a unique label. Query for the Pod with the label as selector MUST be successful. Update the pod to change the value of the Label. Query for the Pod with the new value for the label MUST be successful.
*/
framework.ConformanceIt("should be updated [NodeConformance]", func() {
By("creating the pod")
@@ -296,7 +302,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
},
},
},
@@ -330,10 +336,9 @@ var _ = framework.KubeDescribe("Pods", func() {
})
/*
Testname: pods-update-active-deadline-seconds
Description: Make sure it is possible to create a pod, update its
activeDeadlineSecondsValue, and then waits for the deadline to pass
and verifies the pod is terminated.
Release : v1.9
Testname: Pods, ActiveDeadlineSeconds
Description: Create a Pod with a unique label. Query for the Pod with the label as selector MUST be successful. The Pod is updated with ActiveDeadlineSeconds set on the Pod spec. Pod MUST terminate of the specified time elapses.
*/
framework.ConformanceIt("should allow activeDeadlineSeconds to be updated [NodeConformance]", func() {
By("creating the pod")
@@ -351,7 +356,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
},
},
},
@@ -377,9 +382,9 @@ var _ = framework.KubeDescribe("Pods", func() {
})
/*
Testname: pods-contain-services-environment-variables
Description: Make sure that when a pod is created it contains environment
variables for each active service.
Release : v1.9
Testname: Pods, service environment variables
Description: Create a server Pod listening on port 9376. A Service called fooservice is created for the server Pod listening on port 8765 targeting port 8080. If a new Pod is created in the cluster then the Pod MUST have the fooservice environment variables available from this new Pod. The new create Pod MUST have environment variables such as FOOSERVICE_SERVICE_HOST, FOOSERVICE_SERVICE_PORT, FOOSERVICE_PORT, FOOSERVICE_PORT_8765_TCP_PORT, FOOSERVICE_PORT_8765_TCP_PROTO, FOOSERVICE_PORT_8765_TCP and FOOSERVICE_PORT_8765_TCP_ADDR that are populated with proper values.
*/
framework.ConformanceIt("should contain environment variables for services [NodeConformance]", func() {
// Make a pod that will be a service.
@@ -442,7 +447,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: containerName,
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
},
},
@@ -481,7 +486,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: "main",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "echo container is alive; sleep 600"},
},
},
@@ -499,8 +504,8 @@ var _ = framework.KubeDescribe("Pods", func() {
Param("stderr", "1").
Param("stdout", "1").
Param("container", pod.Spec.Containers[0].Name).
Param("command", "cat").
Param("command", "/etc/resolv.conf")
Param("command", "echo").
Param("command", "remote execution test")
url := req.URL()
ws, err := framework.OpenWebSocketForURL(url, config, []string{"channel.k8s.io"})
@@ -536,8 +541,8 @@ var _ = framework.KubeDescribe("Pods", func() {
if buf.Len() == 0 {
return fmt.Errorf("Unexpected output from server")
}
if !strings.Contains(buf.String(), "nameserver") {
return fmt.Errorf("Expected to find 'nameserver' in %q", buf.String())
if !strings.Contains(buf.String(), "remote execution test") {
return fmt.Errorf("Expected to find 'remote execution test' in %q", buf.String())
}
return nil
}, time.Minute, 10*time.Second).Should(BeNil())
@@ -557,7 +562,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: "main",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "echo container is alive; sleep 10000"},
},
},
@@ -612,7 +617,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: containerName,
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "sleep 5", "/crash/missing"},
},
},
@@ -623,7 +628,7 @@ var _ = framework.KubeDescribe("Pods", func() {
By("updating the image")
podClient.Update(podName, func(pod *v1.Pod) {
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.NginxSlim)
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.Nginx)
})
time.Sleep(syncLoopFrequency)
@@ -653,7 +658,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: containerName,
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "sleep 5", "/crash/missing"},
},
},
@@ -694,4 +699,64 @@ var _ = framework.KubeDescribe("Pods", func() {
framework.Failf("expected %s back-off got=%s on delay2", kubelet.MaxContainerBackOff, delay2)
}
})
// TODO(freehan): label the test to be [NodeConformance] after tests are proven to be stable.
It("should support pod readiness gates [NodeFeature:PodReadinessGate]", func() {
podName := "pod-ready"
readinessGate1 := "k8s.io/test-condition1"
readinessGate2 := "k8s.io/test-condition2"
patchStatusFmt := `{"status":{"conditions":[{"type":%q, "status":%q}]}}`
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{"test": "pod-readiness-gate"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pod-readiness-gate",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "echo container is alive; sleep 10000"},
},
},
ReadinessGates: []v1.PodReadinessGate{
{ConditionType: v1.PodConditionType(readinessGate1)},
{ConditionType: v1.PodConditionType(readinessGate2)},
},
},
}
validatePodReadiness := func(expectReady bool) {
Expect(wait.Poll(time.Second, maxReadyStatusUpdateTolerance, func() (bool, error) {
podReady := podClient.PodIsReady(podName)
res := expectReady == podReady
if !res {
framework.Logf("Expect the Ready condition of pod %q to be %v, but got %v", podName, expectReady, podReady)
}
return res, nil
})).NotTo(HaveOccurred())
}
By("submitting the pod to kubernetes")
podClient.CreateSync(pod)
Expect(podClient.PodIsReady(podName)).To(BeFalse(), "Expect pod's Ready condition to be false initially.")
By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate1))
_, err := podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), "status")
Expect(err).NotTo(HaveOccurred())
// Sleep for 10 seconds.
time.Sleep(maxReadyStatusUpdateTolerance)
Expect(podClient.PodIsReady(podName)).To(BeFalse(), "Expect pod's Ready condition to be false with only one condition in readinessGates equal to True")
By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate2))
_, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), "status")
Expect(err).NotTo(HaveOccurred())
validatePodReadiness(true)
By(fmt.Sprintf("patching pod status with condition %q to false", readinessGate1))
_, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), "status")
Expect(err).NotTo(HaveOccurred())
validatePodReadiness(false)
})
})

View File

@@ -24,6 +24,7 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
type PrivilegedPodTestConfig struct {
@@ -90,14 +91,14 @@ func (c *PrivilegedPodTestConfig) createPodsSpec() *v1.Pod {
Containers: []v1.Container{
{
Name: c.privilegedContainer,
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: &v1.SecurityContext{Privileged: &isPrivileged},
Command: []string{"/bin/sleep", "10000"},
},
{
Name: c.notPrivilegedContainer,
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: &v1.SecurityContext{Privileged: &notPrivileged},
Command: []string{"/bin/sleep", "10000"},

View File

@@ -26,6 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -36,16 +37,18 @@ var _ = Describe("[sig-storage] Projected", func() {
f := framework.NewDefaultFramework("projected")
/*
Testname: projected-secret-no-defaultMode
Description: Simple projected Secret test with no defaultMode set.
Release : v1.9
Testname: Projected Volume, Secrets, volume mode default
Description: A Pod is created with a projected volume source secret to store a secret with a specified key with default permission mode. Pod MUST be able to read the content of the key successfully and the mode MUST be -rw-r--r-- by default.
*/
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil)
})
/*
Testname: projected-secret-with-defaultMode
Description: Simple projected Secret test with defaultMode set.
Release : v1.9
Testname: Projected Volume, Secrets, volume mode 0400
Description: A Pod is created with a projected volume source secret to store a secret with a specified key with permission mode set to 0x400 on the Pod. Pod MUST be able to read the content of the key successfully and the mode MUST be -r—-—————.
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() {
defaultMode := int32(0400)
@@ -53,9 +56,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-secret-with-nonroot-defaultMode-fsGroup
Description: Simple projected Secret test as non-root with
defaultMode and fsGroup set.
Release : v1.9
Testname: Project Volume, Secrets, non-root, custom fsGroup
Description: A Pod is created with a projected volume source secret to store a secret with a specified key. The volume has permission mode set to 0440, fsgroup set to 1001 and user set to non-root uid of 1000. Pod MUST be able to read the content of the key successfully and the mode MUST be -r—-r————-.
*/
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance]", func() {
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
@@ -65,19 +68,18 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-secret-simple-mapped
Description: Simple projected Secret test, by setting a secret and
mounting it to a volume with a custom path (mapping) on the pod with
no other settings and make sure the pod actually consumes it.
Release : v1.9
Testname: Projected Volume, Secrets, mapped
Description: A Pod is created with a projected volume source secret to store a secret with a specified key with default permission mode. The secret is also mapped to a custom path. Pod MUST be able to read the content of the key successfully and the mode MUST be -r—-—————— on the mapped volume.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
doProjectedSecretE2EWithMapping(f, nil)
})
/*
Testname: projected-secret-with-item-mode-mapped
Description: Repeat the projected-secret-simple-mapped but this time
with an item mode (e.g. 0400) for the secret map item.
Release : v1.9
Testname: Projected Volume, Secrets, mapped, volume mode 0400
Description: A Pod is created with a projected volume source secret to store a secret with a specified key with permission mode set to 0400. The secret is also mapped to a specific name. Pod MUST be able to read the content of the key successfully and the mode MUST be -r—-—————— on the mapped volume.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [NodeConformance]", func() {
mode := int32(0400)
@@ -106,9 +108,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-secret-multiple-volumes
Description: Make sure secrets works when mounted as two different
volumes on the same node.
Release : v1.9
Testname: Projected Volume, Secrets, mapped, multiple paths
Description: A Pod is created with a projected volume source secret to store a secret with a specified key. The secret is mapped to two different volume mounts. Pod MUST be able to read the content of the key successfully from the two volume mounts and the mode MUST be -r—-—————— on the mapped volumes.
*/
framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func() {
// This test ensures that the same secret can be mounted in multiple
@@ -171,7 +173,7 @@ var _ = Describe("[sig-storage] Projected", func() {
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/projected-secret-volume/data-1",
"--file_mode=/etc/projected-secret-volume/data-1"},
@@ -200,8 +202,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-secret-simple-optional
Description: Make sure secrets works when optional updates included.
Release : v1.9
Testname: Projected Volume, Secrets, create, update delete
Description: Create a Pod with three containers with secrets namely a create, update and delete container. Create Container when started MUST no have a secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container.
*/
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
@@ -320,7 +323,7 @@ var _ = Describe("[sig-storage] Projected", func() {
Containers: []v1.Container{
{
Name: deleteContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-secret-volumes/delete/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -332,7 +335,7 @@ var _ = Describe("[sig-storage] Projected", func() {
},
{
Name: updateContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-secret-volumes/update/data-3"},
VolumeMounts: []v1.VolumeMount{
{
@@ -344,7 +347,7 @@ var _ = Describe("[sig-storage] Projected", func() {
},
{
Name: createContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-secret-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -401,18 +404,18 @@ var _ = Describe("[sig-storage] Projected", func() {
// Part 2/3 - ConfigMaps
/*
Testname: projected-volume-configMap-nomappings-succeeds
Description: Make sure that a projected volume with a configMap with
no mappings succeeds properly.
Release : v1.9
Testname: Projected Volume, ConfigMap, volume mode default
Description: A Pod is created with projected volume source ConfigMap to store a configMap with default permission mode. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw-r—-r—-.
*/
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
doProjectedConfigMapE2EWithoutMappings(f, 0, 0, nil)
})
/*
Testname: projected-volume-configMap-consumable-defaultMode
Description: Make sure that a projected volume configMap is consumable
with defaultMode set.
Release : v1.9
Testname: Projected Volume, ConfigMap, volume mode 0400
Description: A Pod is created with projected volume source ConfigMap to store a configMap with permission mode set to 0400. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -r——-——-—-.
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() {
defaultMode := int32(0400)
@@ -425,9 +428,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-volume-configMap-consumable-nonroot
Description: Make sure that a projected volume configMap is consumable
by a non-root userID.
Release : v1.9
Testname: Projected Volume, ConfigMap, non-root user
Description: A Pod is created with projected volume source ConfigMap to store a configMap as non-root user with uid 1000. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw—r——r—-.
*/
framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func() {
doProjectedConfigMapE2EWithoutMappings(f, 1000, 0, nil)
@@ -438,19 +441,18 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-configmap-simple-mapped
Description: Simplest projected ConfigMap test, by setting a config
map and mounting it to a volume with a custom path (mapping) on the
pod with no other settings and make sure the pod actually consumes it.
Release : v1.9
Testname: Projected Volume, ConfigMap, mapped
Description: A Pod is created with projected volume source ConfigMap to store a configMap with default permission mode. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -rw—r——r—-.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
doProjectedConfigMapE2EWithMappings(f, 0, 0, nil)
})
/*
Testname: projected-secret-with-item-mode-mapped
Description: Repeat the projected-secret-simple-mapped but this time
with an item mode (e.g. 0400) for the secret map item
Release : v1.9
Testname: Projected Volume, ConfigMap, mapped, volume mode 0400
Description: A Pod is created with projected volume source ConfigMap to store a configMap with permission mode set to 0400. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -r-—r——r—-.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [NodeConformance]", func() {
mode := int32(0400)
@@ -458,9 +460,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-configmap-simpler-user-mapped
Description: Repeat the projected-config-map-simple-mapped but this
time with a user other than root.
Release : v1.9
Testname: Projected Volume, ConfigMap, mapped, non-root user
Description: A Pod is created with projected volume source ConfigMap to store a configMap as non-root user with uid 1000. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -r-—r——r—-.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func() {
doProjectedConfigMapE2EWithMappings(f, 1000, 0, nil)
@@ -471,10 +473,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-volume-configMaps-updated-successfully
Description: Make sure that if a projected volume has configMaps,
that the values in these configMaps can be updated, deleted,
and created.
Release : v1.9
Testname: Projected Volume, ConfigMap, update
Description: A Pod is created with projected volume source ConfigMap to store a configMap and performs a create and update to new value. Pod MUST be able to create the configMap with value-1. Pod MUST be able to update the value in the confgiMap to value-2.
*/
framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
@@ -526,7 +527,7 @@ var _ = Describe("[sig-storage] Projected", func() {
Containers: []v1.Container{
{
Name: containerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -560,10 +561,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-volume-optional-configMaps-updated-successfully
Description: Make sure that if a projected volume has optional
configMaps, that the values in these configMaps can be updated,
deleted, and created.
Release : v1.9
Testname: Projected Volume, ConfigMap, create, update and delete
Description: Create a Pod with three containers with ConfigMaps namely a create, update and delete container. Create Container when started MUST not have configMap, update and delete containers MUST be created with a ConfigMap value as value-1. Create a configMap in the create container, the Pod MUST be able to read the configMap from the create container. Update the configMap in the update container, Pod MUST be able to read the updated configMap value. Delete the configMap in the delete container. Pod MUST fail to read the configMap from the delete container.
*/
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
@@ -682,7 +682,7 @@ var _ = Describe("[sig-storage] Projected", func() {
Containers: []v1.Container{
{
Name: deleteContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volumes/delete/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -694,7 +694,7 @@ var _ = Describe("[sig-storage] Projected", func() {
},
{
Name: updateContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volumes/update/data-3"},
VolumeMounts: []v1.VolumeMount{
{
@@ -706,7 +706,7 @@ var _ = Describe("[sig-storage] Projected", func() {
},
{
Name: createContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -762,9 +762,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-configmap-multiple-volumes
Description: Make sure config map works when it mounted as two
different volumes on the same node.
Release : v1.9
Testname: Projected Volume, ConfigMap, multiple volume paths
Description: A Pod is created with a projected volume source ConfigMap to store a configMap. The configMap is mapped to two different volume mounts. Pod MUST be able to read the content of the configMap successfully from the two volume mounts.
*/
framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func() {
var (
@@ -825,7 +825,7 @@ var _ = Describe("[sig-storage] Projected", func() {
Containers: []v1.Container{
{
Name: "projected-configmap-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{"--file_content=/etc/projected-configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -860,9 +860,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-podname
Description: Ensure that downward API can provide pod's name through
DownwardAPIVolumeFiles in a projected volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, pod name
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the pod name from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide podname only [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -874,10 +874,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-set-default-mode
Description: Ensure that downward API can set default file permission
mode for DownwardAPIVolumeFiles if no mode is specified in a projected
volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, volume mode 0400
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The default mode for the volume mount is set to 0400. Pod MUST be able to read the pod name from the mounted DownwardAPIVolumeFiles and the volume mode must be -r—-—————.
*/
framework.ConformanceIt("should set DefaultMode on files [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -890,9 +889,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-set-mode
Description: Ensure that downward API can set file permission mode for
DownwardAPIVolumeFiles in a projected volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, volume mode 0400
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The default mode for the volume mount is set to 0400. Pod MUST be able to read the pod name from the mounted DownwardAPIVolumeFiles and the volume mode must be -r—-—————.
*/
framework.ConformanceIt("should set mode on item file [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -934,10 +933,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-update-label
Description: Ensure that downward API updates labels in
DownwardAPIVolumeFiles when pod's labels get modified in a projected
volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, update labels
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests and label items. Pod MUST be able to read the labels from the mounted DownwardAPIVolumeFiles. Labels are then updated. Pod MUST be able to read the updated values for the Labels.
*/
framework.ConformanceIt("should update labels on modification [NodeConformance]", func() {
labels := map[string]string{}
@@ -967,10 +965,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-update-annotation
Description: Ensure that downward API updates annotations in
DownwardAPIVolumeFiles when pod's annotations get modified in a
projected volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, update annotation
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests and annotation items. Pod MUST be able to read the annotations from the mounted DownwardAPIVolumeFiles. Annotations are then updated. Pod MUST be able to read the updated values for the Annotations.
*/
framework.ConformanceIt("should update annotations on modification [NodeConformance]", func() {
annotations := map[string]string{}
@@ -1002,9 +999,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-cpu-limit
Description: Ensure that downward API can provide container's CPU
limit through DownwardAPIVolumeFiles in a projected volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, CPU limits
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the cpu limits from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -1016,9 +1013,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-memory-limit
Description: Ensure that downward API can provide container's memory
limit through DownwardAPIVolumeFiles in a projected volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, memory limits
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the memory limits from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -1030,9 +1027,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-cpu-request
Description: Ensure that downward API can provide container's CPU
request through DownwardAPIVolumeFiles in a projected volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, CPU request
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the cpu request from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -1044,9 +1041,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-memory-request
Description: Ensure that downward API can provide container's memory
request through DownwardAPIVolumeFiles in a projected volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, memory request
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the memory request from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide container's memory request [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -1058,10 +1055,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-default-cpu
Description: Ensure that downward API can provide default node
allocatable value for CPU through DownwardAPIVolumeFiles if CPU limit
is not specified for a container in a projected volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, CPU limit, node allocatable
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The CPU and memory resources for requests and limits are NOT specified for the container. Pod MUST be able to read the default cpu limits from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -1071,10 +1067,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-default-memory
Description: Ensure that downward API can provide default node
allocatable value for memory through DownwardAPIVolumeFiles if memory
limit is not specified for a container in a projected volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, memory limit, node allocatable
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The CPU and memory resources for requests and limits are NOT specified for the container. Pod MUST be able to read the default memory limits from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -1085,9 +1080,9 @@ var _ = Describe("[sig-storage] Projected", func() {
// Test multiple projections
/*
Testname: projected-configmap-secret-same-dir
Description: This test projects a secret and configmap into the same
directory to ensure projection is working as intended.
Release : v1.9
Testname: Projected Volume, multiple projections
Description: A Pod is created with a projected volume source for secrets, configMap and downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the secrets, configMap values and the cpu and memory limits as well as cpu and memory requests from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should project all components that make up the projection API [Projection][NodeConformance]", func() {
var err error
@@ -1126,7 +1121,7 @@ var _ = Describe("[sig-storage] Projected", func() {
pod.Spec.Containers = []v1.Container{
{
Name: "projected-all-volume-test",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "cat /all/podname && cat /all/secret-data && cat /all/configmap-data"},
VolumeMounts: []v1.VolumeMount{
{
@@ -1186,7 +1181,7 @@ func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int
Containers: []v1.Container{
{
Name: "projected-secret-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/projected-secret-volume/data-1",
"--file_mode=/etc/projected-secret-volume/data-1"},
@@ -1272,7 +1267,7 @@ func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) {
Containers: []v1.Container{
{
Name: "projected-secret-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/projected-secret-volume/new-path-data-1",
"--file_mode=/etc/projected-secret-volume/new-path-data-1"},
@@ -1349,7 +1344,7 @@ func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup
Containers: []v1.Container{
{
Name: "projected-configmap-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/projected-configmap-volume/data-1",
"--file_mode=/etc/projected-configmap-volume/data-1"},
@@ -1440,7 +1435,7 @@ func doProjectedConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup in
Containers: []v1.Container{
{
Name: "projected-configmap-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{"--file_content=/etc/projected-configmap-volume/path/to/data-2",
"--file_mode=/etc/projected-configmap-volume/path/to/data-2"},
VolumeMounts: []v1.VolumeMount{
@@ -1490,7 +1485,7 @@ func projectedDownwardAPIVolumePodForModeTest(name, filePath string, itemMode, d
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--file_mode=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
@@ -1516,7 +1511,7 @@ func projectedDownwardAPIVolumePodForUpdateTest(name string, labels, annotations
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", "--retry_time=1200", "--file_content_in_loop=" + filePath},
VolumeMounts: []v1.VolumeMount{
{

386
vendor/k8s.io/kubernetes/test/e2e/common/runtime.go generated vendored Normal file
View File

@@ -0,0 +1,386 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"path"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
gomegatypes "github.com/onsi/gomega/types"
)
var _ = framework.KubeDescribe("Container Runtime", func() {
f := framework.NewDefaultFramework("container-runtime")
Describe("blackbox test", func() {
Context("when starting a container that exits", func() {
It("should run with the expected status [NodeConformance]", func() {
restartCountVolumeName := "restart-count"
restartCountVolumePath := "/restart-count"
testContainer := v1.Container{
Image: framework.BusyBoxImage,
VolumeMounts: []v1.VolumeMount{
{
MountPath: restartCountVolumePath,
Name: restartCountVolumeName,
},
},
}
testVolumes := []v1.Volume{
{
Name: restartCountVolumeName,
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory},
},
},
}
testCases := []struct {
Name string
RestartPolicy v1.RestartPolicy
Phase v1.PodPhase
State ContainerState
RestartCount int32
Ready bool
}{
{"terminate-cmd-rpa", v1.RestartPolicyAlways, v1.PodRunning, ContainerStateRunning, 2, true},
{"terminate-cmd-rpof", v1.RestartPolicyOnFailure, v1.PodSucceeded, ContainerStateTerminated, 1, false},
{"terminate-cmd-rpn", v1.RestartPolicyNever, v1.PodFailed, ContainerStateTerminated, 0, false},
}
for _, testCase := range testCases {
// It failed at the 1st run, then succeeded at 2nd run, then run forever
cmdScripts := `
f=%s
count=$(echo 'hello' >> $f ; wc -l $f | awk {'print $1'})
if [ $count -eq 1 ]; then
exit 1
fi
if [ $count -eq 2 ]; then
exit 0
fi
while true; do sleep 1; done
`
tmpCmd := fmt.Sprintf(cmdScripts, path.Join(restartCountVolumePath, "restartCount"))
testContainer.Name = testCase.Name
testContainer.Command = []string{"sh", "-c", tmpCmd}
terminateContainer := ConformanceContainer{
PodClient: f.PodClient(),
Container: testContainer,
RestartPolicy: testCase.RestartPolicy,
Volumes: testVolumes,
PodSecurityContext: &v1.PodSecurityContext{
SELinuxOptions: &v1.SELinuxOptions{
Level: "s0",
},
},
}
terminateContainer.Create()
defer terminateContainer.Delete()
By(fmt.Sprintf("Container '%s': should get the expected 'RestartCount'", testContainer.Name))
Eventually(func() (int32, error) {
status, err := terminateContainer.GetStatus()
return status.RestartCount, err
}, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(Equal(testCase.RestartCount))
By(fmt.Sprintf("Container '%s': should get the expected 'Phase'", testContainer.Name))
Eventually(terminateContainer.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(Equal(testCase.Phase))
By(fmt.Sprintf("Container '%s': should get the expected 'Ready' condition", testContainer.Name))
Expect(terminateContainer.IsReady()).Should(Equal(testCase.Ready))
status, err := terminateContainer.GetStatus()
Expect(err).ShouldNot(HaveOccurred())
By(fmt.Sprintf("Container '%s': should get the expected 'State'", testContainer.Name))
Expect(GetContainerState(status.State)).To(Equal(testCase.State))
By(fmt.Sprintf("Container '%s': should be possible to delete [NodeConformance]", testContainer.Name))
Expect(terminateContainer.Delete()).To(Succeed())
Eventually(terminateContainer.Present, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(BeFalse())
}
})
rootUser := int64(0)
nonRootUser := int64(10000)
for _, testCase := range []struct {
name string
container v1.Container
phase v1.PodPhase
message gomegatypes.GomegaMatcher
}{
{
name: "if TerminationMessagePath is set [NodeConformance]",
container: v1.Container{
Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo -n DONE > /dev/termination-log"},
TerminationMessagePath: "/dev/termination-log",
SecurityContext: &v1.SecurityContext{
RunAsUser: &rootUser,
},
},
phase: v1.PodSucceeded,
message: Equal("DONE"),
},
{
name: "if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance]",
container: v1.Container{
Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo -n DONE > /dev/termination-custom-log"},
TerminationMessagePath: "/dev/termination-custom-log",
SecurityContext: &v1.SecurityContext{
RunAsUser: &nonRootUser,
},
},
phase: v1.PodSucceeded,
message: Equal("DONE"),
},
{
name: "from log output if TerminationMessagePolicy FallbackToLogOnError is set [NodeConformance]",
container: v1.Container{
Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo -n DONE; /bin/false"},
TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
},
phase: v1.PodFailed,
message: Equal("DONE\n"),
},
{
name: "as empty when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set [NodeConformance]",
container: v1.Container{
Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo DONE; /bin/true"},
TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
},
phase: v1.PodSucceeded,
message: Equal(""),
},
{
name: "from file when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set [NodeConformance]",
container: v1.Container{
Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo -n OK > /dev/termination-log; /bin/echo DONE; /bin/true"},
TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
},
phase: v1.PodSucceeded,
message: Equal("OK"),
},
} {
It(fmt.Sprintf("should report termination message %s", testCase.name), func() {
testCase.container.Name = "termination-message-container"
c := ConformanceContainer{
PodClient: f.PodClient(),
Container: testCase.container,
RestartPolicy: v1.RestartPolicyNever,
}
By("create the container")
c.Create()
defer c.Delete()
By(fmt.Sprintf("wait for the container to reach %s", testCase.phase))
Eventually(c.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(Equal(testCase.phase))
By("get the container status")
status, err := c.GetStatus()
Expect(err).NotTo(HaveOccurred())
By("the container should be terminated")
Expect(GetContainerState(status.State)).To(Equal(ContainerStateTerminated))
By("the termination message should be set")
Expect(status.State.Terminated.Message).Should(testCase.message)
By("delete the container")
Expect(c.Delete()).To(Succeed())
})
}
})
Context("when running a container with a new image", func() {
// The service account only has pull permission
auth := `
{
"auths": {
"https://gcr.io": {
"auth": "X2pzb25fa2V5OnsKICAidHlwZSI6ICJzZXJ2aWNlX2FjY291bnQiLAogICJwcm9qZWN0X2lkIjogImF1dGhlbnRpY2F0ZWQtaW1hZ2UtcHVsbGluZyIsCiAgInByaXZhdGVfa2V5X2lkIjogImI5ZjJhNjY0YWE5YjIwNDg0Y2MxNTg2MDYzZmVmZGExOTIyNGFjM2IiLAogICJwcml2YXRlX2tleSI6ICItLS0tLUJFR0lOIFBSSVZBVEUgS0VZLS0tLS1cbk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzdTSG5LVEVFaVlMamZcbkpmQVBHbUozd3JCY2VJNTBKS0xxS21GWE5RL3REWGJRK2g5YVl4aldJTDhEeDBKZTc0bVovS01uV2dYRjVLWlNcbm9BNktuSU85Yi9SY1NlV2VpSXRSekkzL1lYVitPNkNjcmpKSXl4anFWam5mVzJpM3NhMzd0OUE5VEZkbGZycm5cbjR6UkpiOWl4eU1YNGJMdHFGR3ZCMDNOSWl0QTNzVlo1ODhrb1FBZmgzSmhhQmVnTWorWjRSYko0aGVpQlFUMDNcbnZVbzViRWFQZVQ5RE16bHdzZWFQV2dydDZOME9VRGNBRTl4bGNJek11MjUzUG4vSzgySFpydEx4akd2UkhNVXhcbng0ZjhwSnhmQ3h4QlN3Z1NORit3OWpkbXR2b0wwRmE3ZGducFJlODZWRDY2ejNZenJqNHlLRXRqc2hLZHl5VWRcbkl5cVhoN1JSQWdNQkFBRUNnZ0VBT3pzZHdaeENVVlFUeEFka2wvSTVTRFVidi9NazRwaWZxYjJEa2FnbmhFcG9cbjFJajJsNGlWMTByOS9uenJnY2p5VlBBd3pZWk1JeDFBZVF0RDdoUzRHWmFweXZKWUc3NkZpWFpQUm9DVlB6b3VcbmZyOGRDaWFwbDV0enJDOWx2QXNHd29DTTdJWVRjZmNWdDdjRTEyRDNRS3NGNlo3QjJ6ZmdLS251WVBmK0NFNlRcbmNNMHkwaCtYRS9kMERvSERoVy96YU1yWEhqOFRvd2V1eXRrYmJzNGYvOUZqOVBuU2dET1lQd2xhbFZUcitGUWFcbkpSd1ZqVmxYcEZBUW14M0Jyd25rWnQzQ2lXV2lGM2QrSGk5RXRVYnRWclcxYjZnK1JRT0licWFtcis4YlJuZFhcbjZWZ3FCQWtKWjhSVnlkeFVQMGQxMUdqdU9QRHhCbkhCbmM0UW9rSXJFUUtCZ1FEMUNlaWN1ZGhXdGc0K2dTeGJcbnplanh0VjFONDFtZHVjQnpvMmp5b1dHbzNQVDh3ckJPL3lRRTM0cU9WSi9pZCs4SThoWjRvSWh1K0pBMDBzNmdcblRuSXErdi9kL1RFalk4MW5rWmlDa21SUFdiWHhhWXR4UjIxS1BYckxOTlFKS2ttOHRkeVh5UHFsOE1veUdmQ1dcbjJ2aVBKS05iNkhabnY5Q3lqZEo5ZzJMRG5RS0JnUUREcVN2eURtaGViOTIzSW96NGxlZ01SK205Z2xYVWdTS2dcbkVzZlllbVJmbU5XQitDN3ZhSXlVUm1ZNU55TXhmQlZXc3dXRldLYXhjK0krYnFzZmx6elZZdFpwMThNR2pzTURcbmZlZWZBWDZCWk1zVXQ3Qmw3WjlWSjg1bnRFZHFBQ0xwWitaLzN0SVJWdWdDV1pRMWhrbmxHa0dUMDI0SkVFKytcbk55SDFnM2QzUlFLQmdRQ1J2MXdKWkkwbVBsRklva0tGTkh1YTBUcDNLb1JTU1hzTURTVk9NK2xIckcxWHJtRjZcbkMwNGNTKzQ0N0dMUkxHOFVUaEpKbTRxckh0Ti9aK2dZOTYvMm1xYjRIakpORDM3TVhKQnZFYTN5ZUxTOHEvK1JcbjJGOU1LamRRaU5LWnhQcG84VzhOSlREWTVOa1BaZGh4a2pzSHdVNGRTNjZwMVRESUU0MGd0TFpaRFFLQmdGaldcbktyblFpTnEzOS9iNm5QOFJNVGJDUUFKbmR3anhTUU5kQTVmcW1rQTlhRk9HbCtqamsxQ1BWa0tNSWxLSmdEYkpcbk9heDl2OUc2Ui9NSTFIR1hmV3QxWU56VnRocjRIdHNyQTB0U3BsbWhwZ05XRTZWejZuQURqdGZQSnMyZUdqdlhcbmpQUnArdjhjY21MK3dTZzhQTGprM3ZsN2VlNXJsWWxNQndNdUdjUHhBb0dBZWRueGJXMVJMbVZubEFpSEx1L0xcbmxtZkF3RFdtRWlJMFVnK1BMbm9Pdk81dFE1ZDRXMS94RU44bFA0cWtzcGtmZk1Rbk5oNFNZR0VlQlQzMlpxQ1RcbkpSZ2YwWGpveXZ2dXA5eFhqTWtYcnBZL3ljMXpmcVRaQzBNTzkvMVVjMWJSR2RaMmR5M2xSNU5XYXA3T1h5Zk9cblBQcE5Gb1BUWGd2M3FDcW5sTEhyR3pNPVxuLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLVxuIiwKICAiY2xpZW50X2VtYWlsIjogImltYWdlLXB1bGxpbmdAYXV0aGVudGljYXRlZC1pbWFnZS1wdWxsaW5nLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwKICAiY2xpZW50X2lkIjogIjExMzc5NzkxNDUzMDA3MzI3ODcxMiIsCiAgImF1dGhfdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi9hdXRoIiwKICAidG9rZW5fdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi90b2tlbiIsCiAgImF1dGhfcHJvdmlkZXJfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9vYXV0aDIvdjEvY2VydHMiLAogICJjbGllbnRfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9yb2JvdC92MS9tZXRhZGF0YS94NTA5L2ltYWdlLXB1bGxpbmclNDBhdXRoZW50aWNhdGVkLWltYWdlLXB1bGxpbmcuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iCn0=",
"email": "image-pulling@authenticated-image-pulling.iam.gserviceaccount.com"
}
}
}`
secret := &v1.Secret{
Data: map[string][]byte{v1.DockerConfigJsonKey: []byte(auth)},
Type: v1.SecretTypeDockerConfigJson,
}
// The following images are not added into NodeImageWhiteList, because this test is
// testing image pulling, these images don't need to be prepulled. The ImagePullPolicy
// is v1.PullAlways, so it won't be blocked by framework image white list check.
for _, testCase := range []struct {
description string
image string
secret bool
phase v1.PodPhase
waiting bool
}{
{
description: "should not be able to pull image from invalid registry",
image: "invalid.com/invalid/alpine:3.1",
phase: v1.PodPending,
waiting: true,
},
{
description: "should not be able to pull non-existing image from gcr.io",
image: "k8s.gcr.io/invalid-image:invalid-tag",
phase: v1.PodPending,
waiting: true,
},
{
description: "should be able to pull image from gcr.io",
image: "k8s.gcr.io/alpine-with-bash:1.0",
phase: v1.PodRunning,
waiting: false,
},
{
description: "should be able to pull image from docker hub",
image: "alpine:3.1",
phase: v1.PodRunning,
waiting: false,
},
{
description: "should not be able to pull from private registry without secret",
image: "gcr.io/authenticated-image-pulling/alpine:3.1",
phase: v1.PodPending,
waiting: true,
},
{
description: "should be able to pull from private registry with secret",
image: "gcr.io/authenticated-image-pulling/alpine:3.1",
secret: true,
phase: v1.PodRunning,
waiting: false,
},
} {
testCase := testCase
It(testCase.description+" [NodeConformance]", func() {
name := "image-pull-test"
command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"}
container := ConformanceContainer{
PodClient: f.PodClient(),
Container: v1.Container{
Name: name,
Image: testCase.image,
Command: command,
// PullAlways makes sure that the image will always be pulled even if it is present before the test.
ImagePullPolicy: v1.PullAlways,
},
RestartPolicy: v1.RestartPolicyNever,
}
if testCase.secret {
secret.Name = "image-pull-secret-" + string(uuid.NewUUID())
By("create image pull secret")
_, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
Expect(err).NotTo(HaveOccurred())
defer f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil)
container.ImagePullSecrets = []string{secret.Name}
}
// checkContainerStatus checks whether the container status matches expectation.
checkContainerStatus := func() error {
status, err := container.GetStatus()
if err != nil {
return fmt.Errorf("failed to get container status: %v", err)
}
// We need to check container state first. The default pod status is pending, If we check
// pod phase first, and the expected pod phase is Pending, the container status may not
// even show up when we check it.
// Check container state
if !testCase.waiting {
if status.State.Running == nil {
return fmt.Errorf("expected container state: Running, got: %q",
GetContainerState(status.State))
}
}
if testCase.waiting {
if status.State.Waiting == nil {
return fmt.Errorf("expected container state: Waiting, got: %q",
GetContainerState(status.State))
}
reason := status.State.Waiting.Reason
if reason != images.ErrImagePull.Error() &&
reason != images.ErrImagePullBackOff.Error() {
return fmt.Errorf("unexpected waiting reason: %q", reason)
}
}
// Check pod phase
phase, err := container.GetPhase()
if err != nil {
return fmt.Errorf("failed to get pod phase: %v", err)
}
if phase != testCase.phase {
return fmt.Errorf("expected pod phase: %q, got: %q", testCase.phase, phase)
}
return nil
}
// The image registry is not stable, which sometimes causes the test to fail. Add retry mechanism to make this
// less flaky.
const flakeRetry = 3
for i := 1; i <= flakeRetry; i++ {
var err error
By("create the container")
container.Create()
By("check the container status")
for start := time.Now(); time.Since(start) < ContainerStatusRetryTimeout; time.Sleep(ContainerStatusPollInterval) {
if err = checkContainerStatus(); err == nil {
break
}
}
By("delete the container")
container.Delete()
if err == nil {
break
}
if i < flakeRetry {
framework.Logf("No.%d attempt failed: %v, retrying...", i, err)
} else {
framework.Failf("All %d attempts failed: %v", flakeRetry, err)
}
}
})
}
})
})
})

View File

@@ -23,6 +23,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
)
@@ -31,9 +32,9 @@ var _ = Describe("[sig-api-machinery] Secrets", func() {
f := framework.NewDefaultFramework("secrets")
/*
Testname: secret-env-vars
Description: Ensure that secret can be consumed via environment
variables.
Release : v1.9
Testname: Secrets, pod environment field
Description: Create a secret. Create a Pod with Container that declares a environment variable which references the secret created to extract a key value from the secret. Pod MUST have the environment variable that contains proper value for the key to the secret.
*/
framework.ConformanceIt("should be consumable from pods in env vars [NodeConformance]", func() {
name := "secret-test-" + string(uuid.NewUUID())
@@ -53,7 +54,7 @@ var _ = Describe("[sig-api-machinery] Secrets", func() {
Containers: []v1.Container{
{
Name: "secret-env-test",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
Env: []v1.EnvVar{
{
@@ -80,9 +81,9 @@ var _ = Describe("[sig-api-machinery] Secrets", func() {
})
/*
Testname: secret-configmaps-source
Description: Ensure that secret can be consumed via source of a set
of ConfigMaps.
Release : v1.9
Testname: Secrets, pod environment from source
Description: Create a secret. Create a Pod with Container that declares a environment variable using EnvFrom which references the secret created to extract a key value from the secret. Pod MUST have the environment variable that contains proper value for the key to the secret.
*/
framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func() {
name := "secret-test-" + string(uuid.NewUUID())
@@ -101,7 +102,7 @@ var _ = Describe("[sig-api-machinery] Secrets", func() {
Containers: []v1.Container{
{
Name: "env-test",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
EnvFrom: []v1.EnvFromSource{
{

View File

@@ -25,6 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -34,18 +35,18 @@ var _ = Describe("[sig-storage] Secrets", func() {
f := framework.NewDefaultFramework("secrets")
/*
Testname: secret-volume-mount-without-mapping
Description: Ensure that secret can be mounted without mapping to a
pod volume.
Release : v1.9
Testname: Secrets Volume, default
Description: Create a secret. Create a Pod with secret volume source configured into the container. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -rw-r--r-- by default.
*/
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
doSecretE2EWithoutMapping(f, nil /* default mode */, "secret-test-"+string(uuid.NewUUID()), nil, nil)
})
/*
Testname: secret-volume-mount-without-mapping-default-mode
Description: Ensure that secret can be mounted without mapping to a
pod volume in default mode.
Release : v1.9
Testname: Secrets Volume, volume mode 0400
Description: Create a secret. Create a Pod with secret volume source configured into the container with file mode set to 0x400. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -r——--—-—- by default.
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() {
defaultMode := int32(0400)
@@ -53,9 +54,9 @@ var _ = Describe("[sig-storage] Secrets", func() {
})
/*
Testname: secret-volume-mount-without-mapping-non-root-default-mode-fsgroup
Description: Ensure that secret can be mounted without mapping to a pod
volume as non-root in default mode with fsGroup set.
Release : v1.9
Testname: Secrets Volume, volume mode 0440, fsGroup 1001 and uid 1000
Description: Create a secret. Create a Pod with secret volume source configured into the container with file mode set to 0x440 as a non-root user with uid 1000 and fsGroup id 1001. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -r——r-—-—- by default.
*/
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance]", func() {
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
@@ -65,25 +66,30 @@ var _ = Describe("[sig-storage] Secrets", func() {
})
/*
Testname: secret-volume-mount-with-mapping
Description: Ensure that secret can be mounted with mapping to a pod
volume.
Release : v1.9
Testname: Secrets Volume, mapping
Description: Create a secret. Create a Pod with secret volume source configured into the container with a custom path. Pod MUST be able to read the secret from the mounted volume from the specified custom path. The file mode of the secret MUST be -rw—r-—r—- by default.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
doSecretE2EWithMapping(f, nil)
})
/*
Testname: secret-volume-mount-with-mapping-item-mode
Description: Ensure that secret can be mounted with mapping to a pod
volume in item mode.
Release : v1.9
Testname: Secrets Volume, mapping, volume mode 0400
Description: Create a secret. Create a Pod with secret volume source configured into the container with a custom path and file mode set to 0x400. Pod MUST be able to read the secret from the mounted volume from the specified custom path. The file mode of the secret MUST be -r-—r-—r—-.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [NodeConformance]", func() {
mode := int32(0400)
doSecretE2EWithMapping(f, &mode)
})
It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func() {
/*
Release : v1.12
Testname: Secrets Volume, volume mode default, secret with same name in different namespace
Description: Create a secret with same name in two namespaces. Create a Pod with secret volume source configured into the container. Pod MUST be able to read the secrets from the mounted volume from the container runtime and only secrets which are associated with namespace where pod is created. The file mode of the secret MUST be -rw-r--r-- by default.
*/
framework.ConformanceIt("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func() {
var (
namespace2 *v1.Namespace
err error
@@ -105,8 +111,9 @@ var _ = Describe("[sig-storage] Secrets", func() {
})
/*
Testname: secret-multiple-volume-mounts
Description: Ensure that secret can be mounted to multiple pod volumes.
Release : v1.9
Testname: Secrets Volume, mapping multiple volume paths
Description: Create a secret. Create a Pod with two secret volume sources configured into the container in to two different custom paths. Pod MUST be able to read the secret from the both the mounted volumes from the two specified custom paths.
*/
framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func() {
// This test ensures that the same secret can be mounted in multiple
@@ -153,7 +160,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/secret-volume/data-1",
"--file_mode=/etc/secret-volume/data-1"},
@@ -182,9 +189,9 @@ var _ = Describe("[sig-storage] Secrets", func() {
})
/*
Testname: secret-mounted-volume-optional-update-change
Description: Ensure that optional update change to secret can be
reflected on a mounted volume.
Release : v1.9
Testname: Secrets Volume, create, update and delete
Description: Create a Pod with three containers with secrets volume sources namely a create, update and delete container. Create Container when started MUST not have secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container.
*/
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
@@ -279,7 +286,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
Containers: []v1.Container{
{
Name: deleteContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/delete/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -291,7 +298,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
},
{
Name: updateContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/update/data-3"},
VolumeMounts: []v1.VolumeMount{
{
@@ -303,7 +310,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
},
{
Name: createContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -406,7 +413,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/secret-volume/data-1",
"--file_mode=/etc/secret-volume/data-1"},
@@ -483,7 +490,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/secret-volume/new-path-data-1",
"--file_mode=/etc/secret-volume/new-path-data-1"},

View File

@@ -0,0 +1,266 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"strings"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
)
var _ = framework.KubeDescribe("Security Context", func() {
f := framework.NewDefaultFramework("security-context-test")
var podClient *framework.PodClient
BeforeEach(func() {
podClient = f.PodClient()
})
Context("When creating a container with runAsUser", func() {
makeUserPod := func(podName, image string, command []string, userid int64) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: image,
Name: podName,
Command: command,
SecurityContext: &v1.SecurityContext{
RunAsUser: &userid,
},
},
},
},
}
}
createAndWaitUserPod := func(userid int64) {
podName := fmt.Sprintf("busybox-user-%d-%s", userid, uuid.NewUUID())
podClient.Create(makeUserPod(podName,
framework.BusyBoxImage,
[]string{"sh", "-c", fmt.Sprintf("test $(id -u) -eq %d", userid)},
userid,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
}
/*
Release : v1.12
Testname: Security Context: runAsUser (id:65534)
Description: Container created with runAsUser option, passing an id (id:65534) uses that
given id when running the container.
*/
It("should run the container with uid 65534 [NodeConformance]", func() {
createAndWaitUserPod(65534)
})
/*
Release : v1.12
Testname: Security Context: runAsUser (id:0)
Description: Container created with runAsUser option, passing an id (id:0) uses that
given id when running the container.
*/
It("should run the container with uid 0 [NodeConformance]", func() {
createAndWaitUserPod(0)
})
})
Context("When creating a pod with readOnlyRootFilesystem", func() {
makeUserPod := func(podName, image string, command []string, readOnlyRootFilesystem bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: image,
Name: podName,
Command: command,
SecurityContext: &v1.SecurityContext{
ReadOnlyRootFilesystem: &readOnlyRootFilesystem,
},
},
},
},
}
}
createAndWaitUserPod := func(readOnlyRootFilesystem bool) string {
podName := fmt.Sprintf("busybox-readonly-%v-%s", readOnlyRootFilesystem, uuid.NewUUID())
podClient.Create(makeUserPod(podName,
framework.BusyBoxImage,
[]string{"sh", "-c", "touch checkfile"},
readOnlyRootFilesystem,
))
if readOnlyRootFilesystem {
podClient.WaitForFailure(podName, framework.PodStartTimeout)
} else {
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
}
return podName
}
/*
Release : v1.12
Testname: Security Context: readOnlyRootFilesystem=true.
Description: when a container has configured readOnlyRootFilesystem to true, write operations are not allowed.
*/
It("should run the container with readonly rootfs when readOnlyRootFilesystem=true [NodeConformance]", func() {
createAndWaitUserPod(true)
})
/*
Release : v1.12
Testname: Security Context: readOnlyRootFilesystem=false.
Description: when a container has configured readOnlyRootFilesystem to false, write operations are allowed.
*/
It("should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance]", func() {
createAndWaitUserPod(false)
})
})
Context("When creating a pod with privileged", func() {
makeUserPod := func(podName, image string, command []string, privileged bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: image,
Name: podName,
Command: command,
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
},
},
},
},
}
}
createAndWaitUserPod := func(privileged bool) string {
podName := fmt.Sprintf("busybox-privileged-%v-%s", privileged, uuid.NewUUID())
podClient.Create(makeUserPod(podName,
framework.BusyBoxImage,
[]string{"sh", "-c", "ip link add dummy0 type dummy || true"},
privileged,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
return podName
}
It("should run the container as unprivileged when false [NodeConformance]", func() {
podName := createAndWaitUserPod(false)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
}
framework.Logf("Got logs for pod %q: %q", podName, logs)
if !strings.Contains(logs, "Operation not permitted") {
framework.Failf("unprivileged container shouldn't be able to create dummy device")
}
})
})
Context("when creating containers with AllowPrivilegeEscalation", func() {
makeAllowPrivilegeEscalationPod := func(podName string, allowPrivilegeEscalation *bool, uid int64) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: imageutils.GetE2EImage(imageutils.Nonewprivs),
Name: podName,
SecurityContext: &v1.SecurityContext{
AllowPrivilegeEscalation: allowPrivilegeEscalation,
RunAsUser: &uid,
},
},
},
},
}
}
createAndMatchOutput := func(podName, output string, allowPrivilegeEscalation *bool, uid int64) error {
podClient.Create(makeAllowPrivilegeEscalationPod(podName,
allowPrivilegeEscalation,
uid,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
return podClient.MatchContainerOutput(podName, podName, output)
}
/*
Testname: allowPrivilegeEscalation unset and uid != 0.
Description: Configuring the allowPrivilegeEscalation unset, allows the privilege escalation operation.
A container is configured with allowPrivilegeEscalation not specified (nil) and a given uid which is not 0.
When the container is run, the container is run using uid=0.
*/
It("should allow privilege escalation when not explicitly set and uid != 0 [NodeConformance]", func() {
podName := "alpine-nnp-nil-" + string(uuid.NewUUID())
if err := createAndMatchOutput(podName, "Effective uid: 0", nil, 1000); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
/*
Testname: allowPrivilegeEscalation=false.
Description: Configuring the allowPrivilegeEscalation to false, does not allow the privilege escalation operation.
A container is configured with allowPrivilegeEscalation=false and a given uid (1000) which is not 0.
When the container is run, the container is run using uid=1000.
*/
It("should not allow privilege escalation when false [NodeConformance]", func() {
podName := "alpine-nnp-false-" + string(uuid.NewUUID())
apeFalse := false
if err := createAndMatchOutput(podName, "Effective uid: 1000", &apeFalse, 1000); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
/*
Testname: allowPrivilegeEscalation=true.
Description: Configuring the allowPrivilegeEscalation to true, allows the privilege escalation operation.
A container is configured with allowPrivilegeEscalation=true and a given uid (1000) which is not 0.
When the container is run, the container is run using uid=0 (making use of the privilege escalation).
*/
It("should allow privilege escalation when true [NodeConformance]", func() {
podName := "alpine-nnp-true-" + string(uuid.NewUUID())
apeTrue := true
if err := createAndMatchOutput(podName, "Effective uid: 0", &apeTrue, 1000); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
})
})

View File

@@ -22,6 +22,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/kubelet/sysctl"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -42,7 +43,7 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
Containers: []v1.Container{
{
Name: "test-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
},
},
RestartPolicy: v1.RestartPolicyNever,

View File

@@ -0,0 +1,98 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"time"
batch "k8s.io/api/batch/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/slice"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const dummyFinalizer = "k8s.io/dummy-finalizer"
var _ = framework.KubeDescribe("TTLAfterFinished", func() {
f := framework.NewDefaultFramework("ttlafterfinished")
alphaFeatureStr := "[Feature:TTLAfterFinished]"
It(fmt.Sprintf("Job should be deleted once it finishes after TTL seconds %s", alphaFeatureStr), func() {
testFinishedJob(f)
})
})
func cleanupJob(f *framework.Framework, job *batch.Job) {
ns := f.Namespace.Name
c := f.ClientSet
framework.Logf("Remove the Job's dummy finalizer; the Job should be deleted cascadingly")
removeFinalizerFunc := func(j *batch.Job) {
j.ObjectMeta.Finalizers = slice.RemoveString(j.ObjectMeta.Finalizers, dummyFinalizer, nil)
}
_, err := framework.UpdateJobWithRetries(c, ns, job.Name, removeFinalizerFunc)
Expect(err).NotTo(HaveOccurred())
framework.WaitForJobGone(c, ns, job.Name, wait.ForeverTestTimeout)
err = framework.WaitForAllJobPodsGone(c, ns, job.Name)
Expect(err).NotTo(HaveOccurred())
}
func testFinishedJob(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
parallelism := int32(1)
completions := int32(1)
backoffLimit := int32(2)
ttl := int32(10)
job := framework.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
job.Spec.TTLSecondsAfterFinished = &ttl
job.ObjectMeta.Finalizers = []string{dummyFinalizer}
defer cleanupJob(f, job)
framework.Logf("Create a Job %s/%s with TTL", job.Namespace, job.Name)
job, err := framework.CreateJob(c, ns, job)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Wait for the Job to finish")
err = framework.WaitForJobFinish(c, ns, job.Name)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Wait for TTL after finished controller to delete the Job")
err = framework.WaitForJobDeleting(c, ns, job.Name)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished")
job, err = framework.GetJob(c, ns, job.Name)
Expect(err).NotTo(HaveOccurred())
finishTime := framework.JobFinishTime(job)
finishTimeUTC := finishTime.UTC()
Expect(finishTime.IsZero()).NotTo(BeTrue())
deleteAtUTC := job.ObjectMeta.DeletionTimestamp.UTC()
Expect(deleteAtUTC).NotTo(BeNil())
expireAtUTC := finishTimeUTC.Add(time.Duration(ttl) * time.Second)
Expect(deleteAtUTC.Before(expireAtUTC)).To(BeFalse())
}

View File

@@ -40,11 +40,6 @@ const (
NodeE2E Suite = "node e2e"
)
var (
mountImage = imageutils.GetE2EImage(imageutils.Mounttest)
busyboxImage = "busybox"
)
var CurrentSuite Suite
// CommonImageWhiteList is the list of images used in common test. These images should be prepulled
@@ -52,20 +47,20 @@ var CurrentSuite Suite
// only used by node e2e test.
// TODO(random-liu): Change the image puller pod to use similar mechanism.
var CommonImageWhiteList = sets.NewString(
"busybox",
imageutils.GetE2EImage(imageutils.BusyBox),
imageutils.GetE2EImage(imageutils.EntrypointTester),
imageutils.GetE2EImage(imageutils.IpcUtils),
imageutils.GetE2EImage(imageutils.Liveness),
imageutils.GetE2EImage(imageutils.Mounttest),
imageutils.GetE2EImage(imageutils.MounttestUser),
imageutils.GetE2EImage(imageutils.Netexec),
imageutils.GetE2EImage(imageutils.NginxSlim),
imageutils.GetE2EImage(imageutils.Nginx),
imageutils.GetE2EImage(imageutils.ServeHostname),
imageutils.GetE2EImage(imageutils.TestWebserver),
imageutils.GetE2EImage(imageutils.Hostexec),
imageutils.GetE2EImage(imageutils.VolumeNFSServer),
imageutils.GetE2EImage(imageutils.VolumeGlusterServer),
imageutils.GetE2EImage(imageutils.E2ENet),
imageutils.GetE2EImage(imageutils.Net),
)
func svcByName(name string, port int) *v1.Service {

View File

@@ -62,7 +62,7 @@ var _ = Describe("[sig-storage] GCP Volumes", func() {
var c clientset.Interface
BeforeEach(func() {
framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu")
framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu", "custom")
namespace = f.Namespace
c = f.ClientSet

View File

@@ -75,10 +75,6 @@ func setupProviderConfig() error {
managedZones = []string{zone}
}
gceAlphaFeatureGate := gcecloud.NewAlphaFeatureGate([]string{
gcecloud.AlphaFeatureNetworkEndpointGroup,
})
gceCloud, err := gcecloud.CreateGCECloud(&gcecloud.CloudConfig{
ApiEndpoint: framework.TestContext.CloudConfig.ApiEndpoint,
ProjectID: framework.TestContext.CloudConfig.ProjectID,
@@ -91,7 +87,8 @@ func setupProviderConfig() error {
NodeInstancePrefix: "",
TokenSource: nil,
UseMetadataServer: false,
AlphaFeatureGate: gceAlphaFeatureGate})
AlphaFeatureGate: gcecloud.NewAlphaFeatureGate([]string{}),
})
if err != nil {
return fmt.Errorf("Error building GCE/GKE provider: %v", err)
@@ -186,41 +183,15 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
// #41007. To avoid those pods preventing the whole test runs (and just
// wasting the whole run), we allow for some not-ready pods (with the
// number equal to the number of allowed not-ready nodes).
if err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, framework.ImagePullerLabels); err != nil {
if err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
framework.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf)
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
framework.Failf("Error waiting for all pods to be running and ready: %v", err)
}
if err := framework.WaitForPodsSuccess(c, metav1.NamespaceSystem, framework.ImagePullerLabels, framework.ImagePrePullingTimeout); err != nil {
// There is no guarantee that the image pulling will succeed in 3 minutes
// and we don't even run the image puller on all platforms (including GKE).
// We wait for it so we get an indication of failures in the logs, and to
// maximize benefit of image pre-pulling.
framework.Logf("WARNING: Image pulling pods failed to enter success in %v: %v", framework.ImagePrePullingTimeout, err)
}
// Dump the output of the nethealth containers only once per run
if framework.TestContext.DumpLogsOnFailure {
logFunc := framework.Logf
if framework.TestContext.ReportDir != "" {
filePath := path.Join(framework.TestContext.ReportDir, "nethealth.txt")
file, err := os.Create(filePath)
if err != nil {
framework.Logf("Failed to create a file with network health data %v: %v\nPrinting to stdout", filePath, err)
} else {
defer file.Close()
if err = file.Chmod(0644); err != nil {
framework.Logf("Failed to chmod to 644 of %v: %v", filePath, err)
}
logFunc = framework.GetLogToFileFunc(file)
framework.Logf("Dumping network health container logs from all nodes to file %v", filePath)
}
} else {
framework.Logf("Dumping network health container logs from all nodes...")
}
framework.LogContainersInPodsWithLabels(c, metav1.NamespaceSystem, framework.ImagePullerLabels, "nethealth", logFunc)
if err := framework.WaitForDaemonSets(c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil {
framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
}
// Log the version of the server and this client.

View File

@@ -15,6 +15,7 @@ go_library(
"deployment_util.go",
"exec_util.go",
"firewall_util.go",
"flake_reporting_util.go",
"framework.go",
"get-kubemark-resource-usage.go",
"google_compute.go",
@@ -58,10 +59,12 @@ go_library(
"//pkg/cloudprovider/providers/gce:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/controller/job:go_default_library",
"//pkg/controller/nodelifecycle:go_default_library",
"//pkg/controller/service:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/apis/config:go_default_library",
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
"//pkg/kubelet/dockershim/metrics:go_default_library",
"//pkg/kubelet/events:go_default_library",
@@ -80,6 +83,58 @@ go_library(
"//pkg/util/taints:go_default_library",
"//pkg/util/version:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/apps/v1beta2:go_default_library",
"//staging/src/k8s.io/api/authorization/v1beta1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/version:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/discovery/cached:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
"//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/manifest:go_default_library",
@@ -103,56 +158,6 @@ go_library(
"//vendor/golang.org/x/net/websocket:go_default_library",
"//vendor/google.golang.org/api/compute/v1:go_default_library",
"//vendor/google.golang.org/api/googleapi:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
"//vendor/k8s.io/api/authorization/v1beta1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/version:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/discovery/cached:go_default_library",
"//vendor/k8s.io/client-go/dynamic:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/restmapper:go_default_library",
"//vendor/k8s.io/client-go/scale:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//vendor/k8s.io/client-go/tools/remotecommand:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)

View File

@@ -17,7 +17,7 @@ limitations under the License.
package framework
import (
"fmt"
"github.com/golang/glog"
"sync"
"time"
@@ -62,7 +62,7 @@ func WaitForNamedAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviews
// GKE doesn't enable the SAR endpoint. Without this endpoint, we cannot determine if the policy engine
// has adjusted as expected. In this case, simply wait one second and hope it's up to date
if apierrors.IsNotFound(err) {
fmt.Printf("SubjectAccessReview endpoint is missing\n")
glog.Info("SubjectAccessReview endpoint is missing")
time.Sleep(1 * time.Second)
return true, nil
}
@@ -94,7 +94,7 @@ func BindClusterRole(c v1beta1rbac.ClusterRoleBindingsGetter, clusterRole, ns st
// if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled.
if err != nil {
fmt.Printf("Error binding clusterrole/%s for %q for %v\n", clusterRole, ns, subjects)
glog.Errorf("Error binding clusterrole/%s for %q for %v\n", clusterRole, ns, subjects)
}
}
@@ -124,7 +124,7 @@ func bindInNamespace(c v1beta1rbac.RoleBindingsGetter, roleType, role, ns string
// if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled.
if err != nil {
fmt.Printf("Error binding %s/%s into %q for %v\n", roleType, role, ns, subjects)
glog.Errorf("Error binding %s/%s into %q for %v\n", roleType, role, ns, subjects)
}
}
@@ -140,7 +140,7 @@ func IsRBACEnabled(f *Framework) bool {
Logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err)
isRBACEnabled = false
} else if crs == nil || len(crs.Items) == 0 {
Logf("No ClusteRoles found; assuming RBAC is disabled.")
Logf("No ClusterRoles found; assuming RBAC is disabled.")
isRBACEnabled = false
} else {
Logf("Found ClusterRoles; assuming RBAC is enabled.")

View File

@@ -21,7 +21,7 @@ import (
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
crdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apiextensions-apiserver/test/integration/testserver"
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
@@ -76,7 +76,7 @@ func CreateTestCRD(f *Framework) (*TestCrd, error) {
crd := newCRDForTest(testcrd)
//create CRD and waits for the resource to be recognized and available.
crd, err = testserver.CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)
crd, err = fixtures.CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)
if err != nil {
Failf("failed to create CustomResourceDefinition: %v", err)
return nil, err
@@ -89,7 +89,7 @@ func CreateTestCRD(f *Framework) (*TestCrd, error) {
testcrd.Crd = crd
testcrd.DynamicClient = resourceClient
testcrd.CleanUp = func() error {
err := testserver.DeleteCustomResourceDefinition(crd, apiExtensionClient)
err := fixtures.DeleteCustomResourceDefinition(crd, apiExtensionClient)
if err != nil {
Failf("failed to delete CustomResourceDefinition(%s): %v", name, err)
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package framework
import (
"context"
"fmt"
"time"
@@ -30,9 +31,11 @@ import (
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale"
watchtools "k8s.io/client-go/tools/watch"
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*apps.Deployment, error) {
@@ -172,7 +175,9 @@ func WatchRecreateDeployment(c clientset.Interface, d *apps.Deployment) error {
d.Generation <= d.Status.ObservedGeneration, nil
}
_, err = watch.Until(2*time.Minute, w, condition)
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, w, condition)
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
}
@@ -255,7 +260,7 @@ func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector ma
Containers: []v1.Container{
{
Name: "write-pod",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
SecurityContext: &v1.SecurityContext{

View File

@@ -54,7 +54,7 @@ func ConstructFirewallForLBService(svc *v1.Service, nodeTag string) *compute.Fir
Failf("can not construct firewall rule for non-loadbalancer type service")
}
fw := compute.Firewall{}
fw.Name = MakeFirewallNameForLBService(cloudprovider.GetLoadBalancerName(svc))
fw.Name = MakeFirewallNameForLBService(cloudprovider.DefaultLoadBalancerName(svc))
fw.TargetTags = []string{nodeTag}
if svc.Spec.LoadBalancerSourceRanges == nil {
fw.SourceRanges = []string{"0.0.0.0/0"}
@@ -80,7 +80,7 @@ func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service,
Failf("can not construct firewall rule for non-loadbalancer type service")
}
fw := compute.Firewall{}
fw.Name = MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.GetLoadBalancerName(svc), isNodesHealthCheck)
fw.Name = MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), isNodesHealthCheck)
fw.TargetTags = []string{nodeTag}
fw.SourceRanges = gcecloud.LoadBalancerSrcRanges()
healthCheckPort := gcecloud.GetNodesHealthCheckPort()

View File

@@ -0,0 +1,91 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bytes"
"fmt"
"sync"
)
type FlakeReport struct {
lock sync.RWMutex
Flakes []string `json:"flakes"`
FlakeCount int `json:"flakeCount"`
}
func NewFlakeReport() *FlakeReport {
return &FlakeReport{
Flakes: []string{},
}
}
func buildDescription(optionalDescription ...interface{}) string {
switch len(optionalDescription) {
case 0:
return ""
default:
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...)
}
}
// RecordFlakeIfError records the error (if non-nil) as a flake along with an optional description.
// This can be used as a replacement of framework.ExpectNoError() for non-critical errors that can
// be considered as 'flakes' to avoid causing failures in tests.
func (f *FlakeReport) RecordFlakeIfError(err error, optionalDescription ...interface{}) {
if err == nil {
return
}
msg := fmt.Sprintf("Unexpected error occurred: %v", err)
desc := buildDescription(optionalDescription)
if desc != "" {
msg = fmt.Sprintf("%v (Description: %v)", msg, desc)
}
Logf(msg)
f.lock.Lock()
defer f.lock.Unlock()
f.Flakes = append(f.Flakes, msg)
f.FlakeCount++
}
func (f *FlakeReport) GetFlakeCount() int {
f.lock.RLock()
defer f.lock.RUnlock()
return f.FlakeCount
}
func (f *FlakeReport) PrintHumanReadable() string {
f.lock.RLock()
defer f.lock.RUnlock()
buf := bytes.Buffer{}
buf.WriteString(fmt.Sprintf("FlakeCount: %v\n", f.FlakeCount))
buf.WriteString("Flakes:\n")
for _, flake := range f.Flakes {
buf.WriteString(fmt.Sprintf("%v\n", flake))
}
return buf.String()
}
func (f *FlakeReport) PrintJSON() string {
f.lock.RLock()
defer f.lock.RUnlock()
return PrettyPrintJSON(f)
}
func (f *FlakeReport) SummaryKind() string {
return "FlakeReport"
}

View File

@@ -21,12 +21,12 @@ import (
"bytes"
"fmt"
"os"
"path"
"strings"
"sync"
"time"
"k8s.io/api/core/v1"
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@@ -42,6 +42,7 @@ import (
"k8s.io/client-go/restmapper"
scaleclient "k8s.io/client-go/scale"
"k8s.io/client-go/tools/clientcmd"
csi "k8s.io/csi-api/pkg/client/clientset/versioned"
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
@@ -67,6 +68,8 @@ type Framework struct {
ClientSet clientset.Interface
KubemarkExternalClusterClientSet clientset.Interface
APIExtensionsClientSet apiextensionsclient.Interface
CSIClientSet csi.Interface
InternalClientset *internalclientset.Clientset
AggregatorClient *aggregatorclient.Clientset
@@ -90,6 +93,9 @@ type Framework struct {
logsSizeCloseChannel chan bool
logsSizeVerifier *LogsSizeVerifier
// Flaky operation failures in an e2e test can be captured through this.
flakeReport *FlakeReport
// To make sure that this framework cleans up after itself, no matter what,
// we install a Cleanup action before each test and clear it after. If we
// should abort, the AfterSuite hook should run all Cleanup actions.
@@ -152,6 +158,15 @@ func (f *Framework) BeforeEach() {
if f.ClientSet == nil {
By("Creating a kubernetes client")
config, err := LoadConfig()
testDesc := CurrentGinkgoTestDescription()
if len(testDesc.ComponentTexts) > 0 {
componentTexts := strings.Join(testDesc.ComponentTexts, " ")
config.UserAgent = fmt.Sprintf(
"%v -- %v",
rest.DefaultKubernetesUserAgent(),
componentTexts)
}
Expect(err).NotTo(HaveOccurred())
config.QPS = f.Options.ClientQPS
config.Burst = f.Options.ClientBurst
@@ -163,12 +178,19 @@ func (f *Framework) BeforeEach() {
}
f.ClientSet, err = clientset.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
f.APIExtensionsClientSet, err = apiextensionsclient.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
f.InternalClientset, err = internalclientset.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
f.AggregatorClient, err = aggregatorclient.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
f.DynamicClient, err = dynamic.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
// csi.storage.k8s.io is based on CRD, which is served only as JSON
jsonConfig := config
jsonConfig.ContentType = "application/json"
f.CSIClientSet, err = csi.NewForConfig(jsonConfig)
Expect(err).NotTo(HaveOccurred())
// create scales getter, set GroupVersion and NegotiatedSerializer to default values
// as they are required when creating a REST client.
@@ -210,7 +232,7 @@ func (f *Framework) BeforeEach() {
}
if !f.SkipNamespaceCreation {
By("Building a namespace api object")
By(fmt.Sprintf("Building a namespace api object, basename %s", f.BaseName))
namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
"e2e-framework": f.BaseName,
})
@@ -269,6 +291,8 @@ func (f *Framework) BeforeEach() {
}
}
f.flakeReport = NewFlakeReport()
}
// AfterEach deletes the namespace, after reading its events.
@@ -326,25 +350,6 @@ func (f *Framework) AfterEach() {
if !f.SkipNamespaceCreation {
DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name)
}
logFunc := Logf
if TestContext.ReportDir != "" {
filePath := path.Join(TestContext.ReportDir, "image-puller.txt")
file, err := os.Create(filePath)
if err != nil {
By(fmt.Sprintf("Failed to create a file with image-puller data %v: %v\nPrinting to stdout", filePath, err))
} else {
By(fmt.Sprintf("Dumping a list of prepulled images on each node to file %v", filePath))
defer file.Close()
if err = file.Chmod(0644); err != nil {
Logf("Failed to chmod to 644 of %v: %v", filePath, err)
}
logFunc = GetLogToFileFunc(file)
}
} else {
By("Dumping a list of prepulled images on each node...")
}
LogContainersInPodsWithLabels(f.ClientSet, metav1.NamespaceSystem, ImagePullerLabels, "image-puller", logFunc)
}
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil {
@@ -382,6 +387,12 @@ func (f *Framework) AfterEach() {
close(f.kubemarkControllerCloseChannel)
}
// Report any flakes that were observed in the e2e test and reset.
if f.flakeReport != nil && f.flakeReport.GetFlakeCount() > 0 {
f.TestSummaries = append(f.TestSummaries, f.flakeReport)
f.flakeReport = nil
}
PrintSummaries(f.TestSummaries, f.BaseName)
// Check whether all nodes are ready after the test.
@@ -409,6 +420,10 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
return ns, err
}
func (f *Framework) RecordFlakeIfError(err error, optionalDescription ...interface{}) {
f.flakeReport.RecordFlakeIfError(err, optionalDescription)
}
// AddNamespacesToDelete adds one or more namespaces to be deleted when the test
// completes.
func (f *Framework) AddNamespacesToDelete(namespaces ...*v1.Namespace) {
@@ -533,7 +548,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
return nil
} else {
return []v1.ServicePort{{
Protocol: "TCP",
Protocol: v1.ProtocolTCP,
Port: int32(svcPort),
TargetPort: intstr.FromInt(contPort),
}}

View File

@@ -21,6 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"github.com/golang/glog"
. "github.com/onsi/gomega"
)
@@ -50,13 +51,13 @@ func NumberOfNVIDIAGPUs(node *v1.Node) int64 {
}
// NVIDIADevicePlugin returns the official Google Device Plugin pod for NVIDIA GPU in GKE
func NVIDIADevicePlugin(ns string) *v1.Pod {
func NVIDIADevicePlugin() *v1.Pod {
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
Expect(err).NotTo(HaveOccurred())
p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "device-plugin-nvidia-gpu-" + string(uuid.NewUUID()),
Namespace: ns,
Namespace: metav1.NamespaceSystem,
},
Spec: ds.Spec.Template.Spec,
@@ -69,7 +70,16 @@ func NVIDIADevicePlugin(ns string) *v1.Pod {
func GetGPUDevicePluginImage() string {
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
if err != nil || ds == nil || len(ds.Spec.Template.Spec.Containers) < 1 {
if err != nil {
glog.Errorf("Failed to parse the device plugin image: %v", err)
return ""
}
if ds == nil {
glog.Errorf("Failed to parse the device plugin image: the extracted DaemonSet is nil")
return ""
}
if len(ds.Spec.Template.Spec.Containers) < 1 {
glog.Errorf("Failed to parse the device plugin image: cannot extract the container from YAML")
return ""
}
return ds.Spec.Template.Spec.Containers[0].Image

View File

@@ -121,8 +121,9 @@ const (
// a single character of padding.
nameLenLimit = 62
NEGAnnotation = "alpha.cloud.google.com/load-balancer-neg"
NEGUpdateTimeout = 2 * time.Minute
NEGAnnotation = "cloud.google.com/neg"
NEGStatusAnnotation = "cloud.google.com/neg-status"
NEGUpdateTimeout = 2 * time.Minute
InstanceGroupAnnotation = "ingress.gcp.kubernetes.io/instance-groups"
@@ -164,6 +165,16 @@ type IngressConformanceTests struct {
ExitLog string
}
// NegStatus contains name and zone of the Network Endpoint Group
// resources associated with this service.
// Needs to be consistent with the NEG internal structs in ingress-gce.
type NegStatus struct {
// NetworkEndpointGroups returns the mapping between service port and NEG
// resource. key is service port, value is the name of the NEG resource.
NetworkEndpointGroups map[int32]string `json:"network_endpoint_groups,omitempty"`
Zones []string `json:"zones,omitempty"`
}
// CreateIngressComformanceTests generates an slice of sequential test cases:
// a simple http ingress, ingress with HTTPS, ingress HTTPS with a modified hostname,
// ingress https with a modified URLMap
@@ -940,7 +951,7 @@ func (cont *GCEIngressController) backendMode(svcPorts map[string]v1.ServicePort
bsMatch := &compute.BackendService{}
// Non-NEG BackendServices are named with the Nodeport in the name.
// NEG BackendServices' names contain the a sha256 hash of a string.
negString := strings.Join([]string{uid, cont.Ns, svcName, sp.TargetPort.String()}, ";")
negString := strings.Join([]string{uid, cont.Ns, svcName, fmt.Sprintf("%v", sp.Port)}, ";")
negHash := fmt.Sprintf("%x", sha256.Sum256([]byte(negString)))[:8]
for _, bs := range beList {
if strings.Contains(bs.Name, strconv.Itoa(int(sp.NodePort))) ||

View File

@@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
jobutil "k8s.io/kubernetes/pkg/controller/job"
)
const (
@@ -181,8 +182,8 @@ func WaitForAllJobPodsRunning(c clientset.Interface, ns, jobName string, paralle
})
}
// WaitForJobFinish uses c to wait for compeletions to complete for the Job jobName in namespace ns.
func WaitForJobFinish(c clientset.Interface, ns, jobName string, completions int32) error {
// WaitForJobComplete uses c to wait for compeletions to complete for the Job jobName in namespace ns.
func WaitForJobComplete(c clientset.Interface, ns, jobName string, completions int32) error {
return wait.Poll(Poll, JobTimeout, func() (bool, error) {
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil {
@@ -192,6 +193,17 @@ func WaitForJobFinish(c clientset.Interface, ns, jobName string, completions int
})
}
// WaitForJobFinish uses c to wait for the Job jobName in namespace ns to finish (either Failed or Complete).
func WaitForJobFinish(c clientset.Interface, ns, jobName string) error {
return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil {
return false, err
}
return jobutil.IsJobFinished(curr), nil
})
}
// WaitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail.
func WaitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error {
return wait.Poll(Poll, timeout, func() (bool, error) {
@@ -239,6 +251,18 @@ func CheckForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parall
return count == parallelism, nil
}
// WaitForAllJobPodsRunning wait for all pods for the Job named jobName in namespace ns
// to be deleted.
func WaitForAllJobPodsGone(c clientset.Interface, ns, jobName string) error {
return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
pods, err := GetJobPods(c, ns, jobName)
if err != nil {
return false, err
}
return len(pods.Items) == 0, nil
})
}
func newBool(val bool) *bool {
p := new(bool)
*p = val
@@ -250,7 +274,7 @@ type updateJobFunc func(*batch.Job)
func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateJobFunc) (job *batch.Job, err error) {
jobs := c.BatchV1().Jobs(namespace)
var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
pollErr := wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
if job, err = jobs.Get(name, metav1.GetOptions{}); err != nil {
return false, err
}
@@ -268,3 +292,25 @@ func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUp
}
return job, pollErr
}
// WaitForJobDeleting uses c to wait for the Job jobName in namespace ns to have
// a non-nil deletionTimestamp (i.e. being deleted).
func WaitForJobDeleting(c clientset.Interface, ns, jobName string) error {
return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil {
return false, err
}
return curr.ObjectMeta.DeletionTimestamp != nil, nil
})
}
func JobFinishTime(finishedJob *batch.Job) metav1.Time {
var finishTime metav1.Time
for _, c := range finishedJob.Status.Conditions {
if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == v1.ConditionTrue {
return c.LastTransitionTime
}
}
return finishTime
}

View File

@@ -492,7 +492,7 @@ type usageDataPerContainer struct {
}
func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error) {
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap")
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap", ports.KubeletPort)
if err != nil {
return "", err
}

View File

@@ -21,12 +21,12 @@ go_library(
"//pkg/apis/core:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/util/system:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/prometheus/common/expfmt:go_default_library",
"//vendor/github.com/prometheus/common/model:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)

View File

@@ -23,9 +23,11 @@ import (
"fmt"
"io"
"math"
"reflect"
"sort"
"strconv"
"strings"
"sync"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -232,6 +234,143 @@ func (l *SchedulingMetrics) PrintJSON() string {
return PrettyPrintJSON(l)
}
type Histogram struct {
Labels map[string]string `json:"labels"`
Buckets map[string]int `json:"buckets"`
}
type HistogramVec []Histogram
func newHistogram(labels map[string]string) *Histogram {
return &Histogram{
Labels: labels,
Buckets: make(map[string]int),
}
}
type EtcdMetrics struct {
BackendCommitDuration HistogramVec `json:"backendCommitDuration"`
SnapshotSaveTotalDuration HistogramVec `json:"snapshotSaveTotalDuration"`
PeerRoundTripTime HistogramVec `json:"peerRoundTripTime"`
WalFsyncDuration HistogramVec `json:"walFsyncDuration"`
MaxDatabaseSize float64 `json:"maxDatabaseSize"`
}
func newEtcdMetrics() *EtcdMetrics {
return &EtcdMetrics{
BackendCommitDuration: make(HistogramVec, 0),
SnapshotSaveTotalDuration: make(HistogramVec, 0),
PeerRoundTripTime: make(HistogramVec, 0),
WalFsyncDuration: make(HistogramVec, 0),
}
}
func (l *EtcdMetrics) SummaryKind() string {
return "EtcdMetrics"
}
func (l *EtcdMetrics) PrintHumanReadable() string {
return PrettyPrintJSON(l)
}
func (l *EtcdMetrics) PrintJSON() string {
return PrettyPrintJSON(l)
}
type EtcdMetricsCollector struct {
stopCh chan struct{}
wg *sync.WaitGroup
metrics *EtcdMetrics
}
func NewEtcdMetricsCollector() *EtcdMetricsCollector {
return &EtcdMetricsCollector{
stopCh: make(chan struct{}),
wg: &sync.WaitGroup{},
metrics: newEtcdMetrics(),
}
}
func getEtcdMetrics() ([]*model.Sample, error) {
// Etcd is only exposed on localhost level. We are using ssh method
if TestContext.Provider == "gke" {
Logf("Not grabbing scheduler metrics through master SSH: unsupported for gke")
return nil, nil
}
cmd := "curl http://localhost:2379/metrics"
sshResult, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
if err != nil || sshResult.Code != 0 {
return nil, fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err)
}
data := sshResult.Stdout
return extractMetricSamples(data)
}
func getEtcdDatabaseSize() (float64, error) {
samples, err := getEtcdMetrics()
if err != nil {
return 0, err
}
for _, sample := range samples {
if sample.Metric[model.MetricNameLabel] == "etcd_debugging_mvcc_db_total_size_in_bytes" {
return float64(sample.Value), nil
}
}
return 0, fmt.Errorf("Couldn't find etcd database size metric")
}
// StartCollecting starts to collect etcd db size metric periodically
// and updates MaxDatabaseSize accordingly.
func (mc *EtcdMetricsCollector) StartCollecting(interval time.Duration) {
mc.wg.Add(1)
go func() {
defer mc.wg.Done()
for {
select {
case <-time.After(interval):
dbSize, err := getEtcdDatabaseSize()
if err != nil {
Logf("Failed to collect etcd database size")
continue
}
mc.metrics.MaxDatabaseSize = math.Max(mc.metrics.MaxDatabaseSize, dbSize)
case <-mc.stopCh:
return
}
}
}()
}
func (mc *EtcdMetricsCollector) StopAndSummarize() error {
close(mc.stopCh)
mc.wg.Wait()
// Do some one-off collection of metrics.
samples, err := getEtcdMetrics()
if err != nil {
return err
}
for _, sample := range samples {
switch sample.Metric[model.MetricNameLabel] {
case "etcd_disk_backend_commit_duration_seconds_bucket":
convertSampleToBucket(sample, &mc.metrics.BackendCommitDuration)
case "etcd_debugging_snap_save_total_duration_seconds_bucket":
convertSampleToBucket(sample, &mc.metrics.SnapshotSaveTotalDuration)
case "etcd_disk_wal_fsync_duration_seconds_bucket":
convertSampleToBucket(sample, &mc.metrics.WalFsyncDuration)
case "etcd_network_peer_round_trip_time_seconds_bucket":
convertSampleToBucket(sample, &mc.metrics.PeerRoundTripTime)
}
}
return nil
}
func (mc *EtcdMetricsCollector) GetMetrics() *EtcdMetrics {
return mc.metrics
}
type SaturationTime struct {
TimeToSaturate time.Duration `json:"timeToSaturate"`
NumberOfNodes int `json:"numberOfNodes"`
@@ -500,6 +639,9 @@ func sendRestRequestToScheduler(c clientset.Interface, op string) (string, error
func getSchedulingLatency(c clientset.Interface) (*SchedulingMetrics, error) {
result := SchedulingMetrics{}
data, err := sendRestRequestToScheduler(c, "GET")
if err != nil {
return nil, err
}
samples, err := extractMetricSamples(data)
if err != nil {
@@ -546,12 +688,33 @@ func VerifySchedulerLatency(c clientset.Interface) (*SchedulingMetrics, error) {
func ResetSchedulerMetrics(c clientset.Interface) error {
responseText, err := sendRestRequestToScheduler(c, "DELETE")
if err != nil || responseText != "metrics reset\n" {
if err != nil {
return fmt.Errorf("Unexpected response: %q", responseText)
}
return nil
}
func convertSampleToBucket(sample *model.Sample, h *HistogramVec) {
labels := make(map[string]string)
for k, v := range sample.Metric {
if k != "le" {
labels[string(k)] = string(v)
}
}
var hist *Histogram
for i := range *h {
if reflect.DeepEqual(labels, (*h)[i].Labels) {
hist = &((*h)[i])
break
}
}
if hist == nil {
hist = newHistogram(labels)
*h = append(*h, *hist)
}
hist.Buckets[string(sample.Metric["le"])] = int(sample.Value)
}
func PrettyPrintJSON(metrics interface{}) string {
output := &bytes.Buffer{}
if err := json.NewEncoder(output).Encode(metrics); err != nil {

View File

@@ -948,7 +948,10 @@ func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout
// This function executes commands on a node so it will work only for some
// environments.
func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1.Node, testFunc func()) {
host := GetNodeExternalIP(node)
host, err := GetNodeExternalIP(node)
if err != nil {
Failf("Error getting node external ip : %v", err)
}
master := GetMasterAddress(c)
By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
defer func() {

View File

@@ -63,7 +63,7 @@ func etcdUpgradeGCE(target_storage, target_version string) error {
os.Environ(),
"TEST_ETCD_VERSION="+target_version,
"STORAGE_BACKEND="+target_storage,
"TEST_ETCD_IMAGE=3.2.18-0")
"TEST_ETCD_IMAGE=3.2.24-1")
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-l", "-M")
return err
@@ -103,7 +103,7 @@ func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error {
env = append(env,
"TEST_ETCD_VERSION="+TestContext.EtcdUpgradeVersion,
"STORAGE_BACKEND="+TestContext.EtcdUpgradeStorage,
"TEST_ETCD_IMAGE=3.2.18-0")
"TEST_ETCD_IMAGE=3.2.24-1")
} else {
// In e2e tests, we skip the confirmation prompt about
// implicit etcd upgrades to simulate the user entering "y".

View File

@@ -30,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/kubernetes/pkg/api/legacyscheme"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/sysctl"
@@ -259,3 +260,9 @@ func (c *PodClient) MatchContainerOutput(name string, containerName string, expe
}
return nil
}
func (c *PodClient) PodIsReady(name string) bool {
pod, err := c.Get(name, metav1.GetOptions{})
ExpectNoError(err)
return podutil.IsPodReady(pod)
}

View File

@@ -61,25 +61,54 @@ func checkProfileGatheringPrerequisites() error {
return nil
}
func gatherProfileOfKind(profileBaseName, kind string) error {
func getPortForComponent(componentName string) (int, error) {
switch componentName {
case "kube-apiserver":
return 8080, nil
case "kube-scheduler":
return 10251, nil
case "kube-controller-manager":
return 10252, nil
}
return -1, fmt.Errorf("Port for component %v unknown", componentName)
}
// Gathers profiles from a master component through SSH. E.g usages:
// - gatherProfile("kube-apiserver", "someTest", "heap")
// - gatherProfile("kube-scheduler", "someTest", "profile")
// - gatherProfile("kube-controller-manager", "someTest", "profile?seconds=20")
//
// We don't export this method but wrappers around it (see below).
func gatherProfile(componentName, profileBaseName, profileKind string) error {
if err := checkProfileGatheringPrerequisites(); err != nil {
return fmt.Errorf("Profile gathering pre-requisite failed: %v", err)
}
profilePort, err := getPortForComponent(componentName)
if err != nil {
return fmt.Errorf("Profile gathering failed finding component port: %v", err)
}
if profileBaseName == "" {
profileBaseName = time.Now().Format(time.RFC3339)
}
// Get the profile data over SSH.
getCommand := fmt.Sprintf("curl -s localhost:8080/debug/pprof/%s", kind)
getCommand := fmt.Sprintf("curl -s localhost:%v/debug/pprof/%s", profilePort, profileKind)
sshResult, err := SSH(getCommand, GetMasterHost()+":22", TestContext.Provider)
if err != nil {
return fmt.Errorf("Failed to execute curl command on master through SSH: %v", err)
}
var profilePrefix string
profilePrefix := componentName
switch {
case kind == "heap":
profilePrefix = "ApiserverMemoryProfile_"
case strings.HasPrefix(kind, "profile"):
profilePrefix = "ApiserverCPUProfile_"
case profileKind == "heap":
profilePrefix += "_MemoryProfile_"
case strings.HasPrefix(profileKind, "profile"):
profilePrefix += "_CPUProfile_"
default:
return fmt.Errorf("Unknown profile kind provided: %s", kind)
return fmt.Errorf("Unknown profile kind provided: %s", profileKind)
}
// Write the data to a file.
// Write the profile data to a file.
rawprofilePath := path.Join(getProfilesDirectoryPath(), profilePrefix+profileBaseName+".pprof")
rawprofile, err := os.Create(rawprofilePath)
if err != nil {
@@ -97,12 +126,12 @@ func gatherProfileOfKind(profileBaseName, kind string) error {
var cmd *exec.Cmd
switch {
// TODO: Support other profile kinds if needed (e.g inuse_space, alloc_objects, mutex, etc)
case kind == "heap":
case profileKind == "heap":
cmd = exec.Command("go", "tool", "pprof", "-pdf", "-symbolize=none", "--alloc_space", rawprofile.Name())
case strings.HasPrefix(kind, "profile"):
case strings.HasPrefix(profileKind, "profile"):
cmd = exec.Command("go", "tool", "pprof", "-pdf", "-symbolize=none", rawprofile.Name())
default:
return fmt.Errorf("Unknown profile kind provided: %s", kind)
return fmt.Errorf("Unknown profile kind provided: %s", profileKind)
}
outfilePath := path.Join(getProfilesDirectoryPath(), profilePrefix+profileBaseName+".pdf")
outfile, err := os.Create(outfilePath)
@@ -124,67 +153,53 @@ func gatherProfileOfKind(profileBaseName, kind string) error {
// finish before the parent goroutine itself finishes, we accept a sync.WaitGroup
// argument in these functions. Typically you would use the following pattern:
//
// func TestFooBar() {
// func TestFoo() {
// var wg sync.WaitGroup
// wg.Add(3)
// go framework.GatherApiserverCPUProfile(&wg, "doing_foo")
// go framework.GatherApiserverMemoryProfile(&wg, "doing_foo")
// go framework.GatherCPUProfile("kube-apiserver", "before_foo", &wg)
// go framework.GatherMemoryProfile("kube-apiserver", "before_foo", &wg)
// <<<< some code doing foo >>>>>>
// go framework.GatherApiserverCPUProfile(&wg, "doing_bar")
// <<<< some code doing bar >>>>>>
// go framework.GatherCPUProfile("kube-scheduler", "after_foo", &wg)
// wg.Wait()
// }
//
// If you do not wish to exercise the waiting logic, pass a nil value for the
// waitgroup argument instead. However, then you would be responsible for ensuring
// that the function finishes.
// that the function finishes. There's also a polling-based gatherer utility for
// CPU profiles available below.
func GatherApiserverCPUProfile(wg *sync.WaitGroup, profileBaseName string) {
GatherApiserverCPUProfileForNSeconds(wg, profileBaseName, DefaultCPUProfileSeconds)
func GatherCPUProfile(componentName string, profileBaseName string, wg *sync.WaitGroup) {
GatherCPUProfileForSeconds(componentName, profileBaseName, DefaultCPUProfileSeconds, wg)
}
func GatherApiserverCPUProfileForNSeconds(wg *sync.WaitGroup, profileBaseName string, n int) {
func GatherCPUProfileForSeconds(componentName string, profileBaseName string, seconds int, wg *sync.WaitGroup) {
if wg != nil {
defer wg.Done()
}
if err := checkProfileGatheringPrerequisites(); err != nil {
Logf("Profile gathering pre-requisite failed: %v", err)
return
}
if profileBaseName == "" {
profileBaseName = time.Now().Format(time.RFC3339)
}
if err := gatherProfileOfKind(profileBaseName, fmt.Sprintf("profile?seconds=%v", n)); err != nil {
Logf("Failed to gather apiserver CPU profile: %v", err)
if err := gatherProfile(componentName, profileBaseName, fmt.Sprintf("profile?seconds=%v", seconds)); err != nil {
Logf("Failed to gather %v CPU profile: %v", componentName, err)
}
}
func GatherApiserverMemoryProfile(wg *sync.WaitGroup, profileBaseName string) {
func GatherMemoryProfile(componentName string, profileBaseName string, wg *sync.WaitGroup) {
if wg != nil {
defer wg.Done()
}
if err := checkProfileGatheringPrerequisites(); err != nil {
Logf("Profile gathering pre-requisite failed: %v", err)
return
}
if profileBaseName == "" {
profileBaseName = time.Now().Format(time.RFC3339)
}
if err := gatherProfileOfKind(profileBaseName, "heap"); err != nil {
Logf("Failed to gather apiserver memory profile: %v", err)
if err := gatherProfile(componentName, profileBaseName, "heap"); err != nil {
Logf("Failed to gather %v memory profile: %v", componentName, err)
}
}
// StartApiserverCPUProfileGatherer is a polling-based gatherer of the apiserver's
// CPU profile. It takes the delay b/w consecutive gatherings as an argument and
// StartCPUProfileGatherer performs polling-based gathering of the component's CPU
// profile. It takes the interval b/w consecutive gatherings as an argument and
// starts the gathering goroutine. To stop the gatherer, close the returned channel.
func StartApiserverCPUProfileGatherer(delay time.Duration) chan struct{} {
func StartCPUProfileGatherer(componentName string, profileBaseName string, interval time.Duration) chan struct{} {
stopCh := make(chan struct{})
go func() {
for {
select {
case <-time.After(delay):
GatherApiserverCPUProfile(nil, "")
case <-time.After(interval):
GatherCPUProfile(componentName, profileBaseName+"_"+time.Now().Format(time.RFC3339), nil)
case <-stopCh:
return
}

View File

@@ -48,6 +48,12 @@ const (
VolumeSelectorKey = "e2e-pv-pool"
)
var (
// Common selinux labels
SELinuxLabel = &v1.SELinuxOptions{
Level: "s0:c0,c1"}
)
// Map of all PVs used in the multi pv-pvc tests. The key is the PV's name, which is
// guaranteed to be unique. The value is {} (empty struct) since we're only interested
// in the PV's name and if it is present. We must always Get the pv object before
@@ -501,22 +507,28 @@ func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error {
// Deletes the passed-in pod and waits for the pod to be terminated. Resilient to the pod
// not existing.
func DeletePodWithWait(f *Framework, c clientset.Interface, pod *v1.Pod) error {
const maxWait = 5 * time.Minute
if pod == nil {
return nil
}
Logf("Deleting pod %q in namespace %q", pod.Name, pod.Namespace)
err := c.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil)
return DeletePodWithWaitByName(f, c, pod.GetName(), pod.GetNamespace())
}
// Deletes the named and namespaced pod and waits for the pod to be terminated. Resilient to the pod
// not existing.
func DeletePodWithWaitByName(f *Framework, c clientset.Interface, podName, podNamespace string) error {
const maxWait = 5 * time.Minute
Logf("Deleting pod %q in namespace %q", podName, podNamespace)
err := c.CoreV1().Pods(podNamespace).Delete(podName, nil)
if err != nil {
if apierrs.IsNotFound(err) {
return nil // assume pod was already deleted
}
return fmt.Errorf("pod Delete API error: %v", err)
}
Logf("Wait up to %v for pod %q to be fully deleted", maxWait, pod.Name)
err = f.WaitForPodNotFound(pod.Name, maxWait)
Logf("Wait up to %v for pod %q to be fully deleted", maxWait, podName)
err = f.WaitForPodNotFound(podName, maxWait)
if err != nil {
return fmt.Errorf("pod %q was not deleted: %v", pod.Name, err)
return fmt.Errorf("pod %q was not deleted: %v", podName, err)
}
return nil
}
@@ -716,7 +728,7 @@ func createPD(zone string) (string, error) {
}
tags := map[string]string{}
err = gceCloud.CreateDisk(pdName, gcecloud.DiskTypeSSD, zone, 10 /* sizeGb */, tags)
err = gceCloud.CreateDisk(pdName, gcecloud.DiskTypeStandard, zone, 2 /* sizeGb */, tags)
if err != nil {
return "", err
}
@@ -1000,11 +1012,19 @@ func CreateNginxPod(client clientset.Interface, namespace string, nodeSelector m
// create security pod with given claims
func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, timeout time.Duration) (*v1.Pod, error) {
return CreateSecPodWithNodeName(client, namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup, "", timeout)
}
// create security pod with given claims
func CreateSecPodWithNodeName(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, nodeName string, timeout time.Duration) (*v1.Pod, error) {
pod := MakeSecPod(namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup)
pod, err := client.CoreV1().Pods(namespace).Create(pod)
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
}
// Setting nodeName
pod.Spec.NodeName = nodeName
// Waiting for pod to be running
err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, namespace, timeout)
if err != nil {

View File

@@ -216,6 +216,14 @@ func WaitForReplicationControllerwithSelector(c clientset.Interface, namespace s
return nil
}
// trimDockerRegistry is the function for trimming the docker.io/library from the beginning of the imagename.
// If community docker installed it will not prefix the registry names with the dockerimages vs registry names prefixed with other runtimes or docker installed via RHEL extra repo.
// So this function will help to trim the docker.io/library if exists
func trimDockerRegistry(imagename string) string {
imagename = strings.Replace(imagename, "docker.io/", "", 1)
return strings.Replace(imagename, "library/", "", 1)
}
// validatorFn is the function which is individual tests will implement.
// we may want it to return more than just an error, at some point.
type validatorFn func(c clientset.Interface, podID string) error
@@ -227,6 +235,7 @@ type validatorFn func(c clientset.Interface, podID string) error
// "testname": which gets bubbled up to the logging/failure messages if errors happen.
// "validator" function: This function is given a podID and a client, and it can do some specific validations that way.
func ValidateController(c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {
containerImage = trimDockerRegistry(containerImage)
getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}"
// NB: kubectl adds the "exists" function to the standard template functions.
// This lets us check to see if the "running" entry exists for each of the containers
@@ -258,6 +267,7 @@ waitLoop:
}
currentImage := RunKubectlOrDie("get", "pods", podID, "-o", "template", getImageTemplate, fmt.Sprintf("--namespace=%v", ns))
currentImage = trimDockerRegistry(currentImage)
if currentImage != containerImage {
Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
continue waitLoop

View File

@@ -254,7 +254,7 @@ func (j *ServiceTestJig) CreateOnlyLocalNodePortService(namespace, serviceName s
svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeNodePort
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.Ports = []v1.ServicePort{{Protocol: "TCP", Port: 80}}
svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: 80}}
})
if createPod {
@@ -1414,7 +1414,7 @@ func CreateServiceSpec(serviceName, externalName string, isHeadless bool, select
headlessService.Spec.ExternalName = externalName
} else {
headlessService.Spec.Ports = []v1.ServicePort{
{Port: 80, Name: "http", Protocol: "TCP"},
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
}
}
if isHeadless {

View File

@@ -64,7 +64,7 @@ func CreateStatefulSetService(name string, labels map[string]string) *v1.Service
},
}
headlessService.Spec.Ports = []v1.ServicePort{
{Port: 80, Name: "http", Protocol: "TCP"},
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
}
headlessService.Spec.ClusterIP = "None"
return headlessService
@@ -810,7 +810,7 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
VolumeMounts: mounts,
},
},

View File

@@ -31,7 +31,7 @@ import (
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubemark"
)
@@ -99,6 +99,8 @@ type TestContextType struct {
OutputPrintType string
// NodeSchedulableTimeout is the timeout for waiting for all nodes to be schedulable.
NodeSchedulableTimeout time.Duration
// SystemDaemonsetStartupTimeout is the timeout for waiting for all system daemonsets to be ready.
SystemDaemonsetStartupTimeout time.Duration
// CreateTestingNS is responsible for creating namespace used for executing e2e tests.
// It accepts namespace base name, which will be prepended with e2e prefix, kube client
// and labels to be applied to a namespace.
@@ -277,6 +279,7 @@ func RegisterClusterFlags() {
flag.IntVar(&TestContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used.")
flag.DurationVar(&TestContext.SystemPodsStartupTimeout, "system-pods-startup-timeout", 10*time.Minute, "Timeout for waiting for all system pods to be running before starting tests.")
flag.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 30*time.Minute, "Timeout for waiting for all nodes to be schedulable.")
flag.DurationVar(&TestContext.SystemDaemonsetStartupTimeout, "system-daemonsets-startup-timeout", 5*time.Minute, "Timeout for waiting for all system daemonsets to be ready.")
flag.StringVar(&TestContext.UpgradeTarget, "upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.")
flag.StringVar(&TestContext.EtcdUpgradeStorage, "etcd-upgrade-storage", "", "The storage version to upgrade to (either 'etcdv2' or 'etcdv3') if doing an etcd upgrade test.")
flag.StringVar(&TestContext.EtcdUpgradeVersion, "etcd-upgrade-version", "", "The etcd binary version to upgrade to (e.g., '3.0.14', '2.3.7') if doing an etcd upgrade test.")
@@ -339,7 +342,7 @@ func createKubeConfig(clientCfg *restclient.Config) *clientcmdapi.Config {
config := clientcmdapi.NewConfig()
credentials := clientcmdapi.NewAuthInfo()
credentials.Token = clientCfg.BearerToken
credentials.TokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token"
credentials.ClientCertificate = clientCfg.TLSClientConfig.CertFile
if len(credentials.ClientCertificate) == 0 {
credentials.ClientCertificateData = clientCfg.TLSClientConfig.CertData

View File

@@ -66,15 +66,15 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/apimachinery/pkg/watch"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
scaleclient "k8s.io/client-go/scale"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale"
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/kubernetes/pkg/api/legacyscheme"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
@@ -87,6 +87,7 @@ import (
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/controller"
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
"k8s.io/kubernetes/pkg/controller/service"
"k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/kubelet/util/format"
@@ -200,19 +201,10 @@ const (
// ssh port
sshPort = "22"
// ImagePrePullingTimeout is the time we wait for the e2e-image-puller
// static pods to pull the list of seeded images. If they don't pull
// images within this time we simply log their output and carry on
// with the tests.
ImagePrePullingTimeout = 5 * time.Minute
)
var (
BusyBoxImage = "busybox"
// Label allocated to the image puller static pod that runs on each node
// before e2es.
ImagePullerLabels = map[string]string{"name": "e2e-image-puller"}
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
// For parsing Kubectl version for version-skewed testing.
gitVersionRegexp = regexp.MustCompile("GitVersion:\"(v.+?)\"")
@@ -250,11 +242,6 @@ func GetServerArchitecture(c clientset.Interface) string {
return arch
}
// GetPauseImageName fetches the pause image name for the same architecture as the apiserver.
func GetPauseImageName(c clientset.Interface) string {
return imageutils.GetE2EImageWithArch(imageutils.Pause, GetServerArchitecture(c))
}
func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) {
return request.Resource("services").SubResource("proxy"), nil
}
@@ -272,7 +259,7 @@ type ContainerFailures struct {
func GetMasterHost() string {
masterUrl, err := url.Parse(TestContext.Host)
ExpectNoError(err)
return masterUrl.Host
return masterUrl.Hostname()
}
func nowStamp() string {
@@ -638,7 +625,7 @@ func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[s
//
// If ignoreLabels is not empty, pods matching this selector are ignored.
func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
ignoreSelector := labels.SelectorFromSet(map[string]string{})
start := time.Now()
Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
timeout, minPods, ns)
@@ -738,6 +725,40 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
return nil
}
// WaitForDaemonSets for all daemonsets in the given namespace to be ready
// (defined as all but 'allowedNotReadyNodes' pods associated with that
// daemonset are ready).
func WaitForDaemonSets(c clientset.Interface, ns string, allowedNotReadyNodes int32, timeout time.Duration) error {
start := time.Now()
Logf("Waiting up to %v for all daemonsets in namespace '%s' to start",
timeout, ns)
return wait.PollImmediate(Poll, timeout, func() (bool, error) {
dsList, err := c.AppsV1().DaemonSets(ns).List(metav1.ListOptions{})
if err != nil {
Logf("Error getting daemonsets in namespace: '%s': %v", ns, err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
var notReadyDaemonSets []string
for _, ds := range dsList.Items {
Logf("%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)", ds.Status.NumberReady, ds.Status.DesiredNumberScheduled, ns, ds.ObjectMeta.Name, int(time.Since(start).Seconds()))
if ds.Status.DesiredNumberScheduled-ds.Status.NumberReady > allowedNotReadyNodes {
notReadyDaemonSets = append(notReadyDaemonSets, ds.ObjectMeta.Name)
}
}
if len(notReadyDaemonSets) > 0 {
Logf("there are not ready daemonsets: %v", notReadyDaemonSets)
return false, nil
}
return true, nil
})
}
func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) {
for _, container := range pod.Spec.Containers {
if strings.Contains(container.Name, containerNameSubstr) {
@@ -862,7 +883,9 @@ func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountN
if err != nil {
return err
}
_, err = watch.Until(timeout, w, conditions.ServiceAccountHasSecrets)
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, w, conditions.ServiceAccountHasSecrets)
return err
}
@@ -1578,7 +1601,9 @@ func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.D
if err != nil {
return err
}
_, err = watch.Until(timeout, w, func(event watch.Event) (bool, error) {
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, w, func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
return false, apierrs.NewNotFound(schema.GroupResource{Resource: "replicationcontrollers"}, "")
@@ -2621,6 +2646,8 @@ func GetReadyNodesIncludingTaintedOrDie(c clientset.Interface) (nodes *v1.NodeLi
return nodes
}
// WaitForAllNodesSchedulable waits up to timeout for all
// (but TestContext.AllowedNotReadyNodes) to become scheduable.
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes)
@@ -2643,7 +2670,13 @@ func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) er
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !isNodeSchedulable(node) {
if _, hasMasterRoleLabel := node.ObjectMeta.Labels[service.LabelNodeRoleMaster]; hasMasterRoleLabel {
// Kops clusters have masters with spec.unscheduable = false and
// node-role.kubernetes.io/master NoSchedule taint.
// Don't wait for them.
continue
}
if !isNodeSchedulable(node) || !isNodeUntainted(node) {
notSchedulable = append(notSchedulable, node)
}
}
@@ -2659,10 +2692,11 @@ func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) er
if len(nodes.Items) >= largeClusterThreshold && attempt%10 == 0 {
Logf("Unschedulable nodes:")
for i := range notSchedulable {
Logf("-> %s Ready=%t Network=%t",
Logf("-> %s Ready=%t Network=%t Taints=%v",
notSchedulable[i].Name,
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeReady, true),
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false))
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false),
notSchedulable[i].Spec.Taints)
}
Logf("================================")
}
@@ -4119,7 +4153,9 @@ func CheckNodesReady(c clientset.Interface, size int, timeout time.Duration) ([]
// Filter out not-ready nodes.
FilterNodes(nodes, func(node v1.Node) bool {
return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
nodeReady := IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
networkReady := IsNodeConditionUnset(&node, v1.NodeNetworkUnavailable) || IsNodeConditionSetAsExpected(&node, v1.NodeNetworkUnavailable, false)
return nodeReady && networkReady
})
numReady := len(nodes.Items)
@@ -4492,7 +4528,7 @@ func isElementOf(podUID types.UID, pods *v1.PodList) bool {
const proxyTimeout = 2 * time.Minute
// NodeProxyRequest performs a get on a node proxy endpoint given the nodename and rest client.
func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.Result, error) {
func NodeProxyRequest(c clientset.Interface, node, endpoint string, port int) (restclient.Result, error) {
// proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call.
// This will leak a goroutine if proxy hangs. #22165
var result restclient.Result
@@ -4501,7 +4537,7 @@ func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.
result = c.CoreV1().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
Name(fmt.Sprintf("%v:%v", node, port)).
Suffix(endpoint).
Do()
@@ -4529,7 +4565,7 @@ func GetKubeletRunningPods(c clientset.Interface, node string) (*v1.PodList, err
func getKubeletPods(c clientset.Interface, node, resource string) (*v1.PodList, error) {
result := &v1.PodList{}
client, err := NodeProxyRequest(c, node, resource)
client, err := NodeProxyRequest(c, node, resource, ports.KubeletPort)
if err != nil {
return &v1.PodList{}, err
}
@@ -4798,7 +4834,8 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeList) {
nodes := &v1.NodeList{}
masters := sets.NewString()
all, _ := c.CoreV1().Nodes().List(metav1.ListOptions{})
all, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
ExpectNoError(err)
for _, n := range all.Items {
if system.IsMasterNode(n.Name) {
masters.Insert(n.Name)
@@ -4842,8 +4879,8 @@ func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testut
func (p *E2ETestNodePreparer) PrepareNodes() error {
nodes := GetReadySchedulableNodesOrDie(p.client)
numTemplates := 0
for k := range p.countToStrategy {
numTemplates += k
for _, v := range p.countToStrategy {
numTemplates += v.Count
}
if numTemplates > len(nodes.Items) {
return fmt.Errorf("Can't prepare Nodes. Got more templates than existing Nodes.")
@@ -4999,7 +5036,7 @@ func GetMasterAddress(c clientset.Interface) string {
// GetNodeExternalIP returns node external IP concatenated with port 22 for ssh
// e.g. 1.2.3.4:22
func GetNodeExternalIP(node *v1.Node) string {
func GetNodeExternalIP(node *v1.Node) (string, error) {
Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
@@ -5009,9 +5046,26 @@ func GetNodeExternalIP(node *v1.Node) string {
}
}
if host == "" {
Failf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
return "", fmt.Errorf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
}
return host
return host, nil
}
// GetNodeInternalIP returns node internal IP
func GetNodeInternalIP(node *v1.Node) (string, error) {
host := ""
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeInternalIP {
if address.Address != "" {
host = net.JoinHostPort(address.Address, sshPort)
break
}
}
}
if host == "" {
return "", fmt.Errorf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
}
return host, nil
}
// SimpleGET executes a get on the given url, returns error if non-200 returned.
@@ -5076,7 +5130,7 @@ func (f *Framework) NewTestPod(name string, requests v1.ResourceList, limits v1.
Containers: []v1.Container{
{
Name: "pause",
Image: GetPauseImageName(f.ClientSet),
Image: imageutils.GetPauseImageName(),
Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,

View File

@@ -41,16 +41,17 @@ package framework
import (
"fmt"
"strconv"
"time"
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
clientset "k8s.io/client-go/kubernetes"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/golang/glog"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -66,7 +67,7 @@ const (
TiB int64 = 1024 * GiB
// Waiting period for volume server (Ceph, ...) to initialize itself.
VolumeServerPodStartupSleep = 20 * time.Second
VolumeServerPodStartupTimeout = 3 * time.Minute
// Waiting period for pod to be cleaned up and unmount its volumes so we
// don't tear down containers with NFS/Ceph/Gluster server too early.
@@ -92,6 +93,8 @@ type VolumeTestConfig struct {
// map <host (source) path> -> <container (dst.) path>
// if <host (source) path> is empty, mount a tmpfs emptydir
ServerVolumes map[string]string
// Message to wait for before starting clients
ServerReadyMessage string
// Wait for the pod to terminate successfully
// False indicates that the pod is long running
WaitForCompletion bool
@@ -114,11 +117,12 @@ type VolumeTest struct {
// NFS-specific wrapper for CreateStorageServer.
func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
config = VolumeTestConfig{
Namespace: namespace,
Prefix: "nfs",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeNFSServer),
ServerPorts: []int{2049},
ServerVolumes: map[string]string{"": "/exports"},
Namespace: namespace,
Prefix: "nfs",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeNFSServer),
ServerPorts: []int{2049},
ServerVolumes: map[string]string{"": "/exports"},
ServerReadyMessage: "NFS started",
}
if len(args) > 0 {
config.ServerArgs = args
@@ -180,6 +184,7 @@ func NewISCSIServer(cs clientset.Interface, namespace string) (config VolumeTest
// iSCSI container needs to insert modules from the host
"/lib/modules": "/lib/modules",
},
ServerReadyMessage: "Configuration restored from /etc/target/saveconfig.json",
}
pod, ip = CreateStorageServer(cs, config)
return config, pod, ip
@@ -195,16 +200,9 @@ func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestCo
ServerVolumes: map[string]string{
"/lib/modules": "/lib/modules",
},
ServerReadyMessage: "Ceph is ready",
}
pod, ip = CreateStorageServer(cs, config)
// Ceph server container needs some time to start. Tests continue working if
// this sleep is removed, however kubelet logs (and kubectl describe
// <client pod>) would be cluttered with error messages about non-existing
// image.
Logf("sleeping a bit to give ceph server time to initialize")
time.Sleep(VolumeServerPodStartupSleep)
// create secrets for the server
secret = &v1.Secret{
TypeMeta: metav1.TypeMeta{
@@ -350,40 +348,52 @@ func StartVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.
ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err)
}
}
if config.ServerReadyMessage != "" {
_, err := LookForStringInLog(pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout)
ExpectNoError(err, "Failed to find %q in pod logs: %s", config.ServerReadyMessage, err)
}
return pod
}
// Wrapper of cleanup function for volume server without secret created by specific CreateStorageServer function.
func CleanUpVolumeServer(f *Framework, serverPod *v1.Pod) {
CleanUpVolumeServerWithSecret(f, serverPod, nil)
}
// Wrapper of cleanup function for volume server with secret created by specific CreateStorageServer function.
func CleanUpVolumeServerWithSecret(f *Framework, serverPod *v1.Pod, secret *v1.Secret) {
cs := f.ClientSet
ns := f.Namespace
if secret != nil {
Logf("Deleting server secret %q...", secret.Name)
err := cs.CoreV1().Secrets(ns.Name).Delete(secret.Name, &metav1.DeleteOptions{})
if err != nil {
Logf("Delete secret failed: %v", err)
}
}
Logf("Deleting server pod %q...", serverPod.Name)
err := DeletePodWithWait(f, cs, serverPod)
if err != nil {
Logf("Server pod delete failed: %v", err)
}
}
// Clean both server and client pods.
func VolumeTestCleanup(f *Framework, config VolumeTestConfig) {
By(fmt.Sprint("cleaning the environment after ", config.Prefix))
defer GinkgoRecover()
client := f.ClientSet
podClient := client.CoreV1().Pods(config.Namespace)
cs := f.ClientSet
err := podClient.Delete(config.Prefix+"-client", nil)
if err != nil {
// Log the error before failing test: if the test has already failed,
// framework.ExpectNoError() won't print anything to logs!
glog.Warningf("Failed to delete client pod: %v", err)
ExpectNoError(err, "Failed to delete client pod: %v", err)
}
err := DeletePodWithWaitByName(f, cs, config.Prefix+"-client", config.Namespace)
Expect(err).To(BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-client", config.Namespace)
if config.ServerImage != "" {
if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) {
ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
}
// See issue #24100.
// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
By("sleeping a bit so kubelet can unmount and detach the volume")
time.Sleep(PodCleanupTimeout)
err = podClient.Delete(config.Prefix+"-server", nil)
if err != nil {
glog.Warningf("Failed to delete server pod: %v", err)
ExpectNoError(err, "Failed to delete server pod: %v", err)
}
err := DeletePodWithWaitByName(f, cs, config.Prefix+"-server", config.Namespace)
Expect(err).To(BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace)
}
}
@@ -435,9 +445,7 @@ func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGro
}
podsNamespacer := client.CoreV1().Pods(config.Namespace)
if fsGroup != nil {
clientPod.Spec.SecurityContext.FSGroup = fsGroup
}
clientPod.Spec.SecurityContext.FSGroup = fsGroup
for i, test := range tests {
volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i)
@@ -476,6 +484,8 @@ func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGro
func InjectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.VolumeSource, content string) {
By(fmt.Sprint("starting ", config.Prefix, " injector"))
podClient := client.CoreV1().Pods(config.Namespace)
podName := fmt.Sprintf("%s-injector-%s", config.Prefix, rand.String(4))
volMountName := fmt.Sprintf("%s-volume-%s", config.Prefix, rand.String(4))
injectPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
@@ -483,7 +493,7 @@ func InjectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.V
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-injector",
Name: podName,
Labels: map[string]string{
"role": config.Prefix + "-injector",
},
@@ -497,7 +507,7 @@ func InjectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.V
Args: []string{"-c", "echo '" + content + "' > /mnt/index.html && chmod o+rX /mnt /mnt/index.html"},
VolumeMounts: []v1.VolumeMount{
{
Name: config.Prefix + "-volume",
Name: volMountName,
MountPath: "/mnt",
},
},
@@ -511,16 +521,17 @@ func InjectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.V
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: config.Prefix + "-volume",
Name: volMountName,
VolumeSource: volume,
},
},
NodeName: config.ClientNodeName,
NodeSelector: config.NodeSelector,
},
}
defer func() {
podClient.Delete(config.Prefix+"-injector", nil)
podClient.Delete(podName, nil)
}()
injectPod, err := podClient.Create(injectPod)

View File

@@ -13,13 +13,14 @@ go_library(
],
importpath = "k8s.io/kubernetes/test/e2e/instrumentation/logging",
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/instrumentation/common:go_default_library",
"//test/e2e/instrumentation/logging/elasticsearch:go_default_library",
"//test/e2e/instrumentation/logging/stackdriver:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
],
)

View File

@@ -15,15 +15,15 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/instrumentation/logging/elasticsearch",
deps = [
"//pkg/apis/core:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/instrumentation/common:go_default_library",
"//test/e2e/instrumentation/logging/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
],
)

View File

@@ -28,6 +28,7 @@ import (
"k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disruptive]", func() {
@@ -69,11 +70,12 @@ var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disrupti
wg.Add(scale)
for i := 0; i < scale; i++ {
go func() {
defer wg.Done()
defer GinkgoRecover()
wave := fmt.Sprintf("wave%v", strconv.Itoa(i))
framework.Logf("Starting logging soak, wave = %v", wave)
RunLogPodsWithSleepOf(f, kbRateInSeconds, wave, totalLogTime)
framework.Logf("Completed logging soak, wave %v", i)
wg.Done()
}()
// Niceness.
time.Sleep(millisecondsBetweenWaves)
@@ -100,7 +102,7 @@ func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname
return v1.PodSpec{
Containers: []v1.Container{{
Name: "logging-soak",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Args: []string{
"/bin/sh",
"-c",

View File

@@ -14,6 +14,9 @@ go_library(
],
importpath = "k8s.io/kubernetes/test/e2e/instrumentation/logging/stackdriver",
deps = [
"//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/instrumentation/common:go_default_library",
"//test/e2e/instrumentation/logging/utils:go_default_library",
@@ -21,9 +24,6 @@ go_library(
"//vendor/golang.org/x/oauth2/google:go_default_library",
"//vendor/google.golang.org/api/logging/v2beta1:go_default_library",
"//vendor/google.golang.org/api/pubsub/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
],
)

View File

@@ -42,7 +42,7 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
framework.SkipUnlessProviderIs("gce", "gke")
})
ginkgo.It("should ingest logs", func() {
ginkgo.It("should ingest logs [Feature:StackdriverLogging]", func() {
withLogProviderForScope(f, podsScope, func(p *sdLogProvider) {
ginkgo.By("Checking ingesting text logs", func() {
pod, err := utils.StartAndReturnSelf(utils.NewRepeatingLoggingPod("synthlogger-1", "hey"), f)
@@ -138,7 +138,7 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
})
})
ginkgo.It("should ingest events", func() {
ginkgo.It("should ingest events [Feature:StackdriverLogging]", func() {
eventCreationInterval := 10 * time.Second
withLogProviderForScope(f, eventsScope, func(p *sdLogProvider) {

View File

@@ -18,15 +18,15 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/instrumentation/logging/utils",
deps = [
"//pkg/apis/core:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/util/integer:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/util/integer:go_default_library",
],
)

View File

@@ -177,7 +177,7 @@ func (p *execLoggingPod) Start(f *framework.Framework) error {
Containers: []api_v1.Container{
{
Name: loggingContainerName,
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: p.cmd,
Resources: api_v1.ResourceRequirements{
Requests: api_v1.ResourceList{

View File

@@ -12,7 +12,6 @@ go_library(
"cadvisor.go",
"custom_metrics_deployments.go",
"custom_metrics_stackdriver.go",
"influxdb.go",
"metrics_grabber.go",
"prometheus.go",
"stackdriver.go",
@@ -20,31 +19,32 @@ go_library(
],
importpath = "k8s.io/kubernetes/test/e2e/instrumentation/monitoring",
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/selection:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1:go_default_library",
"//staging/src/k8s.io/metrics/pkg/client/custom_metrics:go_default_library",
"//staging/src/k8s.io/metrics/pkg/client/external_metrics:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/instrumentation/common:go_default_library",
"//test/e2e/scheduling:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/influxdata/influxdb/client/v2:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/prometheus/common/model:go_default_library",
"//vendor/golang.org/x/oauth2/google:go_default_library",
"//vendor/google.golang.org/api/monitoring/v3:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/selection:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/metrics/pkg/client/custom_metrics:go_default_library",
"//vendor/k8s.io/metrics/pkg/client/external_metrics:go_default_library",
],
)

View File

@@ -74,9 +74,6 @@ func testStackdriverAcceleratorMonitoring(f *framework.Framework) {
scheduling.SetupNVIDIAGPUNode(f, false)
// TODO: remove this after cAdvisor race is fixed.
time.Sleep(time.Minute)
f.PodClient().Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: rcName,

View File

@@ -173,7 +173,7 @@ func stackdriverExporterContainerSpec(name string, namespace string, metricName
}
// PrometheusExporterDeployment is a Deployment of simple application with two containers
// one exposing a metric in prometheus fromat and second a prometheus-to-sd container
// one exposing a metric in prometheus format and second a prometheus-to-sd container
// that scrapes the metric and pushes it to stackdriver.
func PrometheusExporterDeployment(name, namespace string, replicas int32, metricValue int64) *extensions.Deployment {
return &extensions.Deployment{
@@ -211,7 +211,7 @@ func prometheusExporterPodSpec(metricName string, metricValue int64, port int32)
},
{
Name: "prometheus-to-sd",
Image: "k8s.gcr.io/prometheus-to-sd:v0.2.3",
Image: "k8s.gcr.io/prometheus-to-sd:v0.3.1",
ImagePullPolicy: corev1.PullPolicy("Always"),
Command: []string{"/monitor", fmt.Sprintf("--source=:http://localhost:%d", port),
"--stackdriver-prefix=custom.googleapis.com", "--pod-id=$(POD_ID)", "--namespace-id=$(POD_NAMESPACE)"},

View File

@@ -29,11 +29,13 @@ import (
gcm "google.golang.org/api/monitoring/v3"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/client-go/discovery"
"k8s.io/kubernetes/test/e2e/framework"
cmv1beta1 "k8s.io/metrics/pkg/apis/custom_metrics/v1beta1"
customclient "k8s.io/metrics/pkg/client/custom_metrics"
externalclient "k8s.io/metrics/pkg/client/external_metrics"
)
@@ -57,8 +59,10 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
if err != nil {
framework.Failf("Failed to load config: %s", err)
}
customMetricsClient := customclient.NewForConfigOrDie(config)
discoveryClient := discovery.NewDiscoveryClientForConfigOrDie(config)
apiVersionsGetter := customclient.NewAvailableAPIsGetter(discoveryClient)
restMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{cmv1beta1.SchemeGroupVersion})
customMetricsClient := customclient.NewForConfig(config, restMapper, apiVersionsGetter)
testCustomMetrics(f, kubeClient, customMetricsClient, discoveryClient, AdapterForOldResourceModel)
})
@@ -68,8 +72,10 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
if err != nil {
framework.Failf("Failed to load config: %s", err)
}
customMetricsClient := customclient.NewForConfigOrDie(config)
discoveryClient := discovery.NewDiscoveryClientForConfigOrDie(config)
apiVersionsGetter := customclient.NewAvailableAPIsGetter(discoveryClient)
restMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{cmv1beta1.SchemeGroupVersion})
customMetricsClient := customclient.NewForConfig(config, restMapper, apiVersionsGetter)
testCustomMetrics(f, kubeClient, customMetricsClient, discoveryClient, AdapterForNewResourceModel)
})
@@ -109,7 +115,10 @@ func testCustomMetrics(f *framework.Framework, kubeClient clientset.Interface, c
defer CleanupAdapter(adapterDeployment)
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(HPAPermissions)
defer kubeClient.RbacV1().ClusterRoleBindings().Delete("custom-metrics-reader", &metav1.DeleteOptions{})
if err != nil {
framework.Failf("Failed to create ClusterRoleBindings: %v", err)
}
defer kubeClient.RbacV1().ClusterRoleBindings().Delete(HPAPermissions.Name, &metav1.DeleteOptions{})
// Run application that exports the metric
_, err = createSDExporterPods(f, kubeClient)
@@ -153,7 +162,10 @@ func testExternalMetrics(f *framework.Framework, kubeClient clientset.Interface,
defer CleanupAdapter(AdapterForOldResourceModel)
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(HPAPermissions)
defer kubeClient.RbacV1().ClusterRoleBindings().Delete("custom-metrics-reader", &metav1.DeleteOptions{})
if err != nil {
framework.Failf("Failed to create ClusterRoleBindings: %v", err)
}
defer kubeClient.RbacV1().ClusterRoleBindings().Delete(HPAPermissions.Name, &metav1.DeleteOptions{})
// Run application that exports the metric
pod, err := createSDExporterPods(f, kubeClient)
@@ -181,7 +193,7 @@ func verifyResponsesFromCustomMetricsAPI(f *framework.Framework, customMetricsCl
if !containsResource(resources.APIResources, "*/custom.googleapis.com|"+UnusedMetricName) {
framework.Failf("Metric '%s' expected but not received", UnusedMetricName)
}
value, err := customMetricsClient.NamespacedMetrics(f.Namespace.Name).GetForObject(schema.GroupKind{Group: "", Kind: "Pod"}, stackdriverExporterPod1, CustomMetricName)
value, err := customMetricsClient.NamespacedMetrics(f.Namespace.Name).GetForObject(schema.GroupKind{Group: "", Kind: "Pod"}, stackdriverExporterPod1, CustomMetricName, labels.NewSelector())
if err != nil {
framework.Failf("Failed query: %s", err)
}
@@ -192,7 +204,7 @@ func verifyResponsesFromCustomMetricsAPI(f *framework.Framework, customMetricsCl
if err != nil {
framework.Failf("Couldn't create a label filter")
}
values, err := customMetricsClient.NamespacedMetrics(f.Namespace.Name).GetForObjects(schema.GroupKind{Group: "", Kind: "Pod"}, labels.NewSelector().Add(*filter), CustomMetricName)
values, err := customMetricsClient.NamespacedMetrics(f.Namespace.Name).GetForObjects(schema.GroupKind{Group: "", Kind: "Pod"}, labels.NewSelector().Add(*filter), CustomMetricName, labels.NewSelector())
if err != nil {
framework.Failf("Failed query: %s", err)
}

View File

@@ -1,323 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package monitoring
import (
"bytes"
"context"
"encoding/json"
"fmt"
"time"
influxdb "github.com/influxdata/influxdb/client/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
. "github.com/onsi/ginkgo"
)
var _ = instrumentation.SIGDescribe("Monitoring", func() {
f := framework.NewDefaultFramework("monitoring")
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce")
framework.SkipUnlessClusterMonitoringModeIs("influxdb")
})
It("should verify monitoring pods and all cluster nodes are available on influxdb using heapster [Feature:InfluxdbMonitoring]", func() {
testMonitoringUsingHeapsterInfluxdb(f.ClientSet)
})
})
const (
influxdbService = "monitoring-influxdb"
influxdbDatabaseName = "k8s"
podlistQuery = "show tag values from \"cpu/usage\" with key = pod_name"
nodelistQuery = "show tag values from \"cpu/usage\" with key = nodename"
sleepBetweenAttempts = 5 * time.Second
testTimeout = 5 * time.Minute
initializationTimeout = 5 * time.Minute
)
var (
rcLabels = []string{"heapster", "influxGrafana"}
expectedServices = map[string]bool{
influxdbService: false,
"monitoring-grafana": false,
}
)
// Query sends a command to the server and returns the Response
func Query(c clientset.Interface, query string) (*influxdb.Response, error) {
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
result, err := c.CoreV1().RESTClient().Get().
Context(ctx).
Namespace("kube-system").
Resource("services").
Name(influxdbService+":api").
SubResource("proxy").
Suffix("query").
Param("q", query).
Param("db", influxdbDatabaseName).
Param("epoch", "s").
Do().
Raw()
if err != nil {
if ctx.Err() != nil {
framework.Failf("Failed to query influx db: %v", err)
}
return nil, err
}
var response influxdb.Response
dec := json.NewDecoder(bytes.NewReader(result))
dec.UseNumber()
err = dec.Decode(&response)
if err != nil {
return nil, err
}
return &response, nil
}
func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string, error) {
expectedPods := []string{}
// Iterate over the labels that identify the replication controllers that we
// want to check. The rcLabels contains the value values for the k8s-app key
// that identify the replication controllers that we want to check. Using a label
// rather than an explicit name is preferred because the names will typically have
// a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling
// update e.g. to heapster-monitoring-v2. By using a label query we can check for the
// situation when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
// is running (which would be an error except during a rolling update).
for _, rcLabel := range rcLabels {
selector := labels.Set{"k8s-app": rcLabel}.AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
deploymentList, err := c.ExtensionsV1beta1().Deployments(metav1.NamespaceSystem).List(options)
if err != nil {
return nil, err
}
rcList, err := c.CoreV1().ReplicationControllers(metav1.NamespaceSystem).List(options)
if err != nil {
return nil, err
}
psList, err := c.AppsV1().StatefulSets(metav1.NamespaceSystem).List(options)
if err != nil {
return nil, err
}
if (len(rcList.Items) + len(deploymentList.Items) + len(psList.Items)) != 1 {
return nil, fmt.Errorf("expected to find one replica for RC or deployment with label %s but got %d",
rcLabel, len(rcList.Items))
}
// Check all the replication controllers.
for _, rc := range rcList.Items {
selector := labels.Set(rc.Spec.Selector).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(options)
if err != nil {
return nil, err
}
for _, pod := range podList.Items {
if pod.DeletionTimestamp != nil {
continue
}
expectedPods = append(expectedPods, pod.Name)
}
}
// Do the same for all deployments.
for _, rc := range deploymentList.Items {
selector := labels.Set(rc.Spec.Selector.MatchLabels).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(options)
if err != nil {
return nil, err
}
for _, pod := range podList.Items {
if pod.DeletionTimestamp != nil {
continue
}
expectedPods = append(expectedPods, pod.Name)
}
}
// And for pet sets.
for _, ps := range psList.Items {
selector := labels.Set(ps.Spec.Selector.MatchLabels).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(options)
if err != nil {
return nil, err
}
for _, pod := range podList.Items {
if pod.DeletionTimestamp != nil {
continue
}
expectedPods = append(expectedPods, pod.Name)
}
}
}
return expectedPods, nil
}
func expectedServicesExist(c clientset.Interface) error {
serviceList, err := c.CoreV1().Services(metav1.NamespaceSystem).List(metav1.ListOptions{})
if err != nil {
return err
}
for _, service := range serviceList.Items {
if _, ok := expectedServices[service.Name]; ok {
expectedServices[service.Name] = true
}
}
for service, found := range expectedServices {
if !found {
return fmt.Errorf("Service %q not found", service)
}
}
return nil
}
func getAllNodesInCluster(c clientset.Interface) ([]string, error) {
// It should be OK to list unschedulable Nodes here.
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, err
}
result := []string{}
for _, node := range nodeList.Items {
result = append(result, node.Name)
}
return result, nil
}
func getInfluxdbData(c clientset.Interface, query string, tag string) (map[string]bool, error) {
response, err := Query(c, query)
if err != nil {
return nil, err
}
if len(response.Results) != 1 {
return nil, fmt.Errorf("expected only one result from Influxdb for query %q. Got %+v", query, response)
}
if len(response.Results[0].Series) != 1 {
return nil, fmt.Errorf("expected exactly one series for query %q.", query)
}
if len(response.Results[0].Series[0].Columns) != 2 {
framework.Failf("Expected two columns for query %q. Found %v", query, response.Results[0].Series[0].Columns)
}
result := map[string]bool{}
for _, value := range response.Results[0].Series[0].Values {
name := value[1].(string)
result[name] = true
}
return result, nil
}
func expectedItemsExist(expectedItems []string, actualItems map[string]bool) bool {
if len(actualItems) < len(expectedItems) {
return false
}
for _, item := range expectedItems {
if _, found := actualItems[item]; !found {
return false
}
}
return true
}
func validatePodsAndNodes(c clientset.Interface, expectedPods, expectedNodes []string) bool {
pods, err := getInfluxdbData(c, podlistQuery, "pod_id")
if err != nil {
// We don't fail the test here because the influxdb service might still not be running.
framework.Logf("failed to query list of pods from influxdb. Query: %q, Err: %v", podlistQuery, err)
return false
}
nodes, err := getInfluxdbData(c, nodelistQuery, "hostname")
if err != nil {
framework.Logf("failed to query list of nodes from influxdb. Query: %q, Err: %v", nodelistQuery, err)
return false
}
if !expectedItemsExist(expectedPods, pods) {
framework.Logf("failed to find all expected Pods.\nExpected: %v\nActual: %v", expectedPods, pods)
return false
}
if !expectedItemsExist(expectedNodes, nodes) {
framework.Logf("failed to find all expected Nodes.\nExpected: %v\nActual: %v", expectedNodes, nodes)
return false
}
return true
}
func testMonitoringUsingHeapsterInfluxdb(c clientset.Interface) {
// Check if heapster pods and services are up.
var expectedPods []string
rcErr := fmt.Errorf("failed to verify expected RCs within timeout")
serviceErr := fmt.Errorf("failed to verify expected services within timeout")
err := wait.PollImmediate(sleepBetweenAttempts, initializationTimeout, func() (bool, error) {
expectedPods, rcErr = verifyExpectedRcsExistAndGetExpectedPods(c)
if rcErr != nil {
framework.Logf("Waiting for expected RCs (got error: %v)", rcErr)
return false, nil
}
serviceErr = expectedServicesExist(c)
if serviceErr != nil {
framework.Logf("Waiting for expected services (got error: %v)", serviceErr)
return false, nil
}
return true, nil
})
if err != nil {
framework.ExpectNoError(rcErr)
framework.ExpectNoError(serviceErr)
framework.Failf("Failed to verify RCs and services within timeout: %v", err)
}
expectedNodes, err := getAllNodesInCluster(c)
framework.ExpectNoError(err)
startTime := time.Now()
for {
if validatePodsAndNodes(c, expectedPods, expectedNodes) {
return
}
if time.Since(startTime) >= testTimeout {
// temporary workaround to help debug issue #12765
printDebugInfo(c)
break
}
time.Sleep(sleepBetweenAttempts)
}
framework.Failf("monitoring using heapster and influxdb test failed")
}
func printDebugInfo(c clientset.Interface) {
set := labels.Set{"k8s-app": "heapster"}
options := metav1.ListOptions{LabelSelector: set.AsSelector().String()}
podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(options)
if err != nil {
framework.Logf("Error while listing pods %v", err)
return
}
for _, pod := range podList.Items {
framework.Logf("Kubectl output:\n%v",
framework.RunKubectlOrDie("log", pod.Name, "--namespace=kube-system", "--container=heapster"))
}
}

View File

@@ -238,7 +238,7 @@ func fetchPrometheusTargetDiscovery(c clientset.Interface) (TargetDiscovery, err
Raw()
var qres promTargetsResponse
if err != nil {
fmt.Printf(string(response))
framework.Logf(string(response))
return qres.Data, err
}
err = json.Unmarshal(response, &qres)
@@ -297,7 +297,7 @@ func queryPrometheus(c clientset.Interface, query string, start, end time.Time,
Do().
Raw()
if err != nil {
fmt.Printf(string(response))
framework.Logf(string(response))
return nil, err
}
var qres promQueryResponse

View File

@@ -16,6 +16,19 @@ go_library(
deps = [
"//pkg/controller:go_default_library",
"//pkg/kubectl/polymorphichelpers:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/generated:go_default_library",
"//test/e2e/scheduling:go_default_library",
@@ -26,19 +39,6 @@ go_library(
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/golang.org/x/net/websocket:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//vendor/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)

View File

@@ -26,7 +26,6 @@ import (
"io"
"io/ioutil"
"log"
"mime/multipart"
"net"
"net/http"
"net/http/httptest"
@@ -93,15 +92,15 @@ var (
nautilusImage = imageutils.GetE2EImage(imageutils.Nautilus)
kittenImage = imageutils.GetE2EImage(imageutils.Kitten)
redisImage = imageutils.GetE2EImage(imageutils.Redis)
nginxImage = imageutils.GetE2EImage(imageutils.NginxSlim)
busyboxImage = "busybox"
nginxImage = imageutils.GetE2EImage(imageutils.Nginx)
busyboxImage = imageutils.GetE2EImage(imageutils.BusyBox)
)
var testImages = struct {
GBFrontendImage string
PauseImage string
NginxSlimImage string
NginxSlimNewImage string
NginxImage string
NginxNewImage string
RedisImage string
GBRedisSlaveImage string
NautilusImage string
@@ -109,8 +108,8 @@ var testImages = struct {
}{
imageutils.GetE2EImage(imageutils.GBFrontend),
imageutils.GetE2EImage(imageutils.Pause),
imageutils.GetE2EImage(imageutils.NginxSlim),
imageutils.GetE2EImage(imageutils.NginxSlimNew),
imageutils.GetE2EImage(imageutils.Nginx),
imageutils.GetE2EImage(imageutils.NginxNew),
imageutils.GetE2EImage(imageutils.Redis),
imageutils.GetE2EImage(imageutils.GBRedisSlave),
imageutils.GetE2EImage(imageutils.Nautilus),
@@ -141,11 +140,11 @@ func substituteImageName(content string) string {
contentWithImageName := new(bytes.Buffer)
tmpl, err := template.New("imagemanifest").Parse(content)
if err != nil {
framework.Failf("Failed Parse the template:", err)
framework.Failf("Failed Parse the template: %v", err)
}
err = tmpl.Execute(contentWithImageName, testImages)
if err != nil {
framework.Failf("Failed executing template:", err)
framework.Failf("Failed executing template: %v", err)
}
return contentWithImageName.String()
}
@@ -183,8 +182,6 @@ var _ = SIGDescribe("Kubectl alpha client", func() {
ns = f.Namespace.Name
})
// Customized Wait / ForEach wrapper for this test. These demonstrate the
framework.KubeDescribe("Kubectl run CronJob", func() {
var nsFlag string
var cjName string
@@ -213,7 +210,7 @@ var _ = SIGDescribe("Kubectl alpha client", func() {
framework.Failf("Failed creating a CronJob with correct schedule %s", schedule)
}
containers := sj.Spec.JobTemplate.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
if checkContainersImage(containers, busyboxImage) {
framework.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers)
}
if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
@@ -255,10 +252,42 @@ var _ = SIGDescribe("Kubectl client", func() {
if err != nil || len(pods) < atLeast {
// TODO: Generalize integrating debug info into these tests so we always get debug info when we need it
framework.DumpAllNamespaceInfo(f.ClientSet, ns)
framework.Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
framework.Failf("Verified %d of %d pods , error: %v", len(pods), atLeast, err)
}
}
debugDiscovery := func() {
home := os.Getenv("HOME")
if len(home) == 0 {
framework.Logf("no $HOME envvar set")
return
}
cacheDir := filepath.Join(home, ".kube", "cache", "discovery")
err := filepath.Walk(cacheDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// only pay attention to $host_$port/v1/serverresources.json files
subpath := strings.TrimPrefix(path, cacheDir+string(filepath.Separator))
parts := filepath.SplitList(subpath)
if len(parts) != 3 || parts[1] != "v1" || parts[2] != "serverresources.json" {
return nil
}
framework.Logf("%s modified at %s (current time: %s)", path, info.ModTime(), time.Now())
data, readError := ioutil.ReadFile(path)
if readError != nil {
framework.Logf("%s error: %v", path, readError)
} else {
framework.Logf("%s content: %s", path, string(data))
}
return nil
})
framework.Logf("scanned %s for discovery docs: %v", home, err)
}
framework.KubeDescribe("Update Demo", func() {
var nautilus, kitten string
BeforeEach(func() {
@@ -291,9 +320,11 @@ var _ = SIGDescribe("Kubectl client", func() {
framework.RunKubectlOrDieInput(nautilus, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
By("scaling down the replication controller")
debugDiscovery()
framework.RunKubectlOrDie("scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
By("scaling up the replication controller")
debugDiscovery()
framework.RunKubectlOrDie("scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
})
@@ -308,6 +339,7 @@ var _ = SIGDescribe("Kubectl client", func() {
framework.RunKubectlOrDieInput(string(nautilus[:]), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
By("rolling-update to new replication controller")
debugDiscovery()
framework.RunKubectlOrDieInput(string(kitten[:]), "rolling-update", "update-demo-nautilus", "--update-period=1s", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, kittenImage, 2, "update-demo", updateDemoSelector, getUDData("kitten.jpg", ns), ns)
// Everything will hopefully be cleaned up when the namespace is deleted.
@@ -393,7 +425,7 @@ var _ = SIGDescribe("Kubectl client", func() {
defer closer.Close()
By("executing a command in the container with pseudo-interactive stdin")
execOutput = framework.NewKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "bash").
execOutput = framework.NewKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "sh").
WithStdinReader(r).
ExecOrDie()
if e, a := "hi", strings.TrimSpace(execOutput); e != a {
@@ -529,7 +561,7 @@ var _ = SIGDescribe("Kubectl client", func() {
ExecOrDie()
Expect(runOutput).ToNot(ContainSubstring("stdin closed"))
g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
runTestPod, _, err := polymorphichelpers.GetFirstPod(f.InternalClientset.Core(), ns, "run=run-test-3", 1*time.Minute, g)
runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g)
if err != nil {
os.Exit(1)
}
@@ -776,6 +808,7 @@ metadata:
By("scale set replicas to 3")
nginxDeploy := "nginx-deployment"
debugDiscovery()
framework.RunKubectlOrDie("scale", "deployment", nginxDeploy, "--replicas=3", nsFlag)
By("apply file doesn't have replicas but image changed")
@@ -783,7 +816,7 @@ metadata:
By("verify replicas still is 3 and image has been updated")
output = framework.RunKubectlOrDieInput(deployment3Yaml, "get", "-f", "-", nsFlag, "-o", "json")
requiredItems := []string{"\"replicas\": 3", imageutils.GetE2EImage(imageutils.NginxSlim)}
requiredItems := []string{"\"replicas\": 3", imageutils.GetE2EImage(imageutils.Nginx)}
for _, item := range requiredItems {
if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl apply", item)
@@ -803,9 +836,6 @@ metadata:
output := framework.RunKubectlOrDie("cluster-info")
// Can't check exact strings due to terminal control commands (colors)
requiredItems := []string{"Kubernetes master", "is running at"}
if framework.ProviderIs("gce", "gke") {
requiredItems = append(requiredItems, "Heapster")
}
for _, item := range requiredItems {
if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl cluster-info", item)
@@ -1236,7 +1266,7 @@ metadata:
framework.Failf("Failed getting rc %s: %v", rcName, err)
}
containers := rc.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
if checkContainersImage(containers, nginxImage) {
framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage)
}
@@ -1297,13 +1327,14 @@ metadata:
framework.Failf("Failed getting rc %s: %v", rcName, err)
}
containers := rc.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
if checkContainersImage(containers, nginxImage) {
framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage)
}
framework.WaitForRCToStabilize(c, ns, rcName, framework.PodStartTimeout)
By("rolling-update to same image controller")
debugDiscovery()
runKubectlRetryOrDie("rolling-update", rcName, "--update-period=1s", "--image="+nginxImage, "--image-pull-policy="+string(v1.PullIfNotPresent), nsFlag)
framework.ValidateController(c, nginxImage, 1, rcName, "run="+rcName, noOpValidatorFn, ns)
})
@@ -1346,7 +1377,7 @@ metadata:
framework.Failf("Failed getting deployment %s: %v", dName, err)
}
containers := d.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
if checkContainersImage(containers, nginxImage) {
framework.Failf("Failed creating deployment %s for 1 pod with expected image %s", dName, nginxImage)
}
@@ -1391,7 +1422,7 @@ metadata:
framework.Failf("Failed getting job %s: %v", jobName, err)
}
containers := job.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
if checkContainersImage(containers, nginxImage) {
framework.Failf("Failed creating job %s for 1 pod with expected image %s: %#v", jobName, nginxImage, containers)
}
if job.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
@@ -1428,7 +1459,7 @@ metadata:
framework.Failf("Failed creating a CronJob with correct schedule %s", schedule)
}
containers := cj.Spec.JobTemplate.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
if checkContainersImage(containers, busyboxImage) {
framework.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers)
}
if cj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
@@ -1464,7 +1495,7 @@ metadata:
framework.Failf("Failed getting pod %s: %v", podName, err)
}
containers := pod.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
if checkContainersImage(containers, nginxImage) {
framework.Failf("Failed creating pod %s with expected image %s", podName, nginxImage)
}
if pod.Spec.RestartPolicy != v1.RestartPolicyNever {
@@ -1518,7 +1549,7 @@ metadata:
framework.Failf("Failed getting deployment %s: %v", podName, err)
}
containers := pod.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
if checkContainersImage(containers, busyboxImage) {
framework.Failf("Failed creating pod with expected image %s", busyboxImage)
}
})
@@ -1834,6 +1865,10 @@ func checkKubectlOutputWithRetry(required [][]string, args ...string) {
return
}
func checkContainersImage(containers []v1.Container, expectImage string) bool {
return containers == nil || len(containers) != 1 || containers[0].Image != expectImage
}
func getAPIVersions(apiEndpoint string) (*metav1.APIVersions, error) {
body, err := curl(apiEndpoint)
if err != nil {
@@ -1959,21 +1994,6 @@ type updateDemoData struct {
const applyTestLabel = "kubectl.kubernetes.io/apply-test"
func readBytesFromFile(filename string) []byte {
file, err := os.Open(filename)
if err != nil {
framework.Failf(err.Error())
}
defer file.Close()
data, err := ioutil.ReadAll(file)
if err != nil {
framework.Failf(err.Error())
}
return data
}
func readReplicationControllerFromString(contents string) *v1.ReplicationController {
rc := v1.ReplicationController{}
if err := yaml.Unmarshal([]byte(contents), &rc); err != nil {
@@ -2090,47 +2110,6 @@ func newBlockingReader(s string) (io.Reader, io.Closer, error) {
return r, w, nil
}
// newStreamingUpload creates a new http.Request that will stream POST
// a file to a URI.
func newStreamingUpload(filePath string) (*io.PipeReader, *multipart.Writer, error) {
file, err := os.Open(filePath)
if err != nil {
return nil, nil, err
}
defer file.Close()
r, w := io.Pipe()
postBodyWriter := multipart.NewWriter(w)
go streamingUpload(file, filepath.Base(filePath), postBodyWriter, w)
return r, postBodyWriter, err
}
// streamingUpload streams a file via a pipe through a multipart.Writer.
// Generally one should use newStreamingUpload instead of calling this directly.
func streamingUpload(file *os.File, fileName string, postBodyWriter *multipart.Writer, w *io.PipeWriter) {
defer GinkgoRecover()
defer file.Close()
defer w.Close()
// Set up the form file
fileWriter, err := postBodyWriter.CreateFormFile("file", fileName)
if err != nil {
framework.Failf("Unable to to write file at %s to buffer. Error: %s", fileName, err)
}
// Copy kubectl binary into the file writer
if _, err := io.Copy(fileWriter, file); err != nil {
framework.Failf("Unable to to copy file at %s into the file writer. Error: %s", fileName, err)
}
// Nothing more should be written to this instance of the postBodyWriter
if err := postBodyWriter.Close(); err != nil {
framework.Failf("Unable to close the writer for file upload. Error: %s", err)
}
}
func startLocalProxy() (srv *httptest.Server, logs *bytes.Buffer) {
logs = &bytes.Buffer{}
p := goproxy.NewProxyHttpServer()

View File

@@ -12,6 +12,7 @@ go_library(
"cluster_upgrade.go",
"framework.go",
"ha_master.go",
"kubelet_security.go",
"reboot.go",
"resize_nodes.go",
"restart.go",
@@ -20,7 +21,16 @@ go_library(
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/kubelet/pod:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/util/version:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/chaosmonkey:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
@@ -34,14 +44,6 @@ go_library(
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/golang.org/x/crypto/ssh:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)

Some files were not shown because too many files have changed in this diff Show More