Add generated file

This PR adds generated files under pkg/client and vendor folder.
This commit is contained in:
xing-yang
2018-07-12 10:55:15 -07:00
parent 36b1de0341
commit e213d1890d
17729 changed files with 5090889 additions and 0 deletions

92
vendor/k8s.io/kubernetes/test/e2e/apimachinery/BUILD generated vendored Normal file
View File

@@ -0,0 +1,92 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"aggregator.go",
"certs.go",
"chunking.go",
"crd_watch.go",
"custom_resource_definition.go",
"etcd_failure.go",
"framework.go",
"garbage_collector.go",
"generated_clientset.go",
"initializers.go",
"namespace.go",
"table_conversion.go",
"watch.go",
"webhook.go",
],
importpath = "k8s.io/kubernetes/test/e2e/apimachinery",
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/rbac:go_default_library",
"//pkg/printers:go_default_library",
"//pkg/util/version:go_default_library",
"//test/e2e/apps:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
"//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/authorization/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/dynamic:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/util/cert:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
"//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1:go_default_library",
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
"//vendor/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@@ -0,0 +1,500 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apimachinery
import (
"crypto/rand"
"encoding/json"
"fmt"
"math/big"
"strings"
"time"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
unstructuredv1 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/client-go/discovery"
clientset "k8s.io/client-go/kubernetes"
apiregistrationv1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1"
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
rbacapi "k8s.io/kubernetes/pkg/apis/rbac"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1"
. "github.com/onsi/ginkgo"
)
var serverAggregatorVersion = utilversion.MustParseSemantic("v1.7.0")
var _ = SIGDescribe("Aggregator", func() {
var ns string
var c clientset.Interface
var aggrclient *aggregatorclient.Clientset
// BeforeEachs run in LIFO order, AfterEachs run in FIFO order.
// We want cleanTest to happen before the namespace cleanup AfterEach
// inserted by NewDefaultFramework, so we put this AfterEach in front
// of NewDefaultFramework.
AfterEach(func() {
cleanTest(c, aggrclient, ns)
})
f := framework.NewDefaultFramework("aggregator")
// We want namespace initialization BeforeEach inserted by
// NewDefaultFramework to happen before this, so we put this BeforeEach
// after NewDefaultFramework.
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
aggrclient = f.AggregatorClient
})
It("Should be able to support the 1.7 Sample API Server using the current Aggregator", func() {
// Make sure the relevant provider supports Agggregator
framework.SkipUnlessServerVersionGTE(serverAggregatorVersion, f.ClientSet.Discovery())
framework.SkipUnlessProviderIs("gce", "gke")
// Testing a 1.7 version of the sample-apiserver
TestSampleAPIServer(f, imageutils.GetE2EImage(imageutils.APIServer))
})
})
func cleanTest(client clientset.Interface, aggrclient *aggregatorclient.Clientset, namespace string) {
// delete the APIService first to avoid causing discovery errors
_ = aggrclient.ApiregistrationV1beta1().APIServices().Delete("v1alpha1.wardle.k8s.io", nil)
_ = client.AppsV1().Deployments(namespace).Delete("sample-apiserver", nil)
_ = client.CoreV1().Secrets(namespace).Delete("sample-apiserver-secret", nil)
_ = client.CoreV1().Services(namespace).Delete("sample-api", nil)
_ = client.CoreV1().ServiceAccounts(namespace).Delete("sample-apiserver", nil)
_ = client.RbacV1beta1().RoleBindings("kube-system").Delete("wardler-auth-reader", nil)
_ = client.RbacV1beta1().ClusterRoles().Delete("wardler", nil)
_ = client.RbacV1beta1().ClusterRoleBindings().Delete("wardler:"+namespace+":anonymous", nil)
}
// A basic test if the sample-apiserver code from 1.7 and compiled against 1.7
// will work on the current Aggregator/API-Server.
func TestSampleAPIServer(f *framework.Framework, image string) {
By("Registering the sample API server.")
client := f.ClientSet
restClient := client.Discovery().RESTClient()
iclient := f.InternalClientset
aggrclient := f.AggregatorClient
namespace := f.Namespace.Name
context := setupServerCert(namespace, "sample-api")
if framework.ProviderIs("gke") {
// kubectl create clusterrolebinding user-cluster-admin-binding --clusterrole=cluster-admin --user=user@domain.com
authenticated := rbacv1beta1.Subject{Kind: rbacv1beta1.GroupKind, Name: user.AllAuthenticated}
framework.BindClusterRole(client.RbacV1beta1(), "cluster-admin", namespace, authenticated)
}
// kubectl create -f namespace.yaml
// NOTE: aggregated apis should generally be set up in there own namespace. As the test framework is setting up a new namespace, we are just using that.
// kubectl create -f secret.yaml
secretName := "sample-apiserver-secret"
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
},
Type: v1.SecretTypeOpaque,
Data: map[string][]byte{
"tls.crt": context.cert,
"tls.key": context.key,
},
}
_, err := client.CoreV1().Secrets(namespace).Create(secret)
framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace)
// kubectl create -f deploy.yaml
deploymentName := "sample-apiserver-deployment"
etcdImage := "quay.io/coreos/etcd:v3.2.18"
podLabels := map[string]string{"app": "sample-apiserver", "apiserver": "true"}
replicas := int32(1)
zero := int64(0)
mounts := []v1.VolumeMount{
{
Name: "apiserver-certs",
ReadOnly: true,
MountPath: "/apiserver.local.config/certificates",
},
}
volumes := []v1.Volume{
{
Name: "apiserver-certs",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{SecretName: secretName},
},
},
}
containers := []v1.Container{
{
Name: "sample-apiserver",
VolumeMounts: mounts,
Args: []string{
"--etcd-servers=http://localhost:2379",
"--tls-cert-file=/apiserver.local.config/certificates/tls.crt",
"--tls-private-key-file=/apiserver.local.config/certificates/tls.key",
"--audit-log-path=-",
"--audit-log-maxage=0",
"--audit-log-maxbackup=0",
},
Image: image,
},
{
Name: "etcd",
Image: etcdImage,
},
}
d := &apps.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
Labels: podLabels,
},
Spec: apps.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: podLabels,
},
Strategy: apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: podLabels,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &zero,
Containers: containers,
Volumes: volumes,
},
},
},
}
deployment, err := client.AppsV1().Deployments(namespace).Create(d)
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
err = framework.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace)
err = framework.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", etcdImage)
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s to complete", etcdImage, deploymentName, namespace)
// kubectl create -f service.yaml
serviceLabels := map[string]string{"apiserver": "true"}
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: "sample-api",
Labels: map[string]string{"test": "aggregator"},
},
Spec: v1.ServiceSpec{
Selector: serviceLabels,
Ports: []v1.ServicePort{
{
Protocol: "TCP",
Port: 443,
TargetPort: intstr.FromInt(443),
},
},
},
}
_, err = client.CoreV1().Services(namespace).Create(service)
framework.ExpectNoError(err, "creating service %s in namespace %s", "sample-apiserver", namespace)
// kubectl create -f serviceAccount.yaml
sa := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "sample-apiserver"}}
_, err = client.CoreV1().ServiceAccounts(namespace).Create(sa)
framework.ExpectNoError(err, "creating service account %s in namespace %s", "sample-apiserver", namespace)
// kubectl create -f authDelegator.yaml
_, err = client.RbacV1beta1().ClusterRoleBindings().Create(&rbacv1beta1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "wardler:" + namespace + ":anonymous",
},
RoleRef: rbacv1beta1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "wardler",
},
Subjects: []rbacv1beta1.Subject{
{
APIGroup: "rbac.authorization.k8s.io",
Kind: "User",
Name: namespace + ":anonymous",
},
},
})
framework.ExpectNoError(err, "creating cluster role binding %s", "wardler:"+namespace+":anonymous")
// kubectl create -f role.yaml
resourceRule, err := rbacapi.NewRule("create", "delete", "deletecollection", "get", "list", "patch", "update", "watch").Groups("wardle.k8s.io").Resources("flunders").Rule()
framework.ExpectNoError(err, "creating cluster resource rule")
urlRule, err := rbacapi.NewRule("get").URLs("*").Rule()
framework.ExpectNoError(err, "creating cluster url rule")
err = wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
roleLabels := map[string]string{"kubernetes.io/bootstrapping": "wardle-default"}
role := rbacapi.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: "wardler",
Labels: roleLabels,
},
Rules: []rbacapi.PolicyRule{resourceRule, urlRule},
}
_, err = iclient.Rbac().ClusterRoles().Create(&role)
if err != nil {
return false, nil
}
return true, nil
})
framework.ExpectNoError(err, "creating cluster role wardler - may not have permissions")
// kubectl create -f auth-reader.yaml
_, err = client.RbacV1beta1().RoleBindings("kube-system").Create(&rbacv1beta1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "wardler-auth-reader",
Annotations: map[string]string{
rbacv1beta1.AutoUpdateAnnotationKey: "true",
},
},
RoleRef: rbacv1beta1.RoleRef{
APIGroup: "",
Kind: "Role",
Name: "extension-apiserver-authentication-reader",
},
Subjects: []rbacv1beta1.Subject{
{
Kind: "ServiceAccount",
Name: "default", // "sample-apiserver",
Namespace: namespace,
},
},
})
framework.ExpectNoError(err, "creating role binding %s:sample-apiserver to access configMap", namespace)
// Wait for the extension apiserver to be up and healthy
// kubectl get deployments -n <aggregated-api-namespace> && status == Running
// NOTE: aggregated apis should generally be set up in there own namespace (<aggregated-api-namespace>). As the test framework
// is setting up a new namespace, we are just using that.
err = framework.WaitForDeploymentComplete(client, deployment)
framework.ExpectNoError(err, "deploying extension apiserver in namespace %s", namespace)
// kubectl create -f apiservice.yaml
_, err = aggrclient.ApiregistrationV1beta1().APIServices().Create(&apiregistrationv1beta1.APIService{
ObjectMeta: metav1.ObjectMeta{Name: "v1alpha1.wardle.k8s.io"},
Spec: apiregistrationv1beta1.APIServiceSpec{
Service: &apiregistrationv1beta1.ServiceReference{
Namespace: namespace,
Name: "sample-api",
},
Group: "wardle.k8s.io",
Version: "v1alpha1",
CABundle: context.signingCert,
GroupPriorityMinimum: 2000,
VersionPriority: 200,
},
})
framework.ExpectNoError(err, "creating apiservice %s with namespace %s", "v1alpha1.wardle.k8s.io", namespace)
var (
currentAPIService *apiregistrationv1beta1.APIService
currentPods *v1.PodList
)
err = pollTimed(100*time.Millisecond, 60*time.Second, func() (bool, error) {
currentAPIService, _ = aggrclient.ApiregistrationV1beta1().APIServices().Get("v1alpha1.wardle.k8s.io", metav1.GetOptions{})
currentPods, _ = client.CoreV1().Pods(namespace).List(metav1.ListOptions{})
request := restClient.Get().AbsPath("/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders")
request.SetHeader("Accept", "application/json")
_, err := request.DoRaw()
if err != nil {
status, ok := err.(*apierrs.StatusError)
if !ok {
return false, err
}
if status.Status().Code == 503 {
return false, nil
}
if status.Status().Code == 404 && strings.HasPrefix(err.Error(), "the server could not find the requested resource") {
return false, nil
}
return false, err
}
return true, nil
}, "Waited %s for the sample-apiserver to be ready to handle requests.")
if err != nil {
currentAPIServiceJSON, _ := json.Marshal(currentAPIService)
framework.Logf("current APIService: %s", string(currentAPIServiceJSON))
currentPodsJSON, _ := json.Marshal(currentPods)
framework.Logf("current pods: %s", string(currentPodsJSON))
if currentPods != nil {
for _, pod := range currentPods.Items {
for _, container := range pod.Spec.Containers {
logs, err := framework.GetPodLogs(client, namespace, pod.Name, container.Name)
framework.Logf("logs of %s/%s (error: %v): %s", pod.Name, container.Name, err, logs)
}
}
}
}
framework.ExpectNoError(err, "gave up waiting for apiservice wardle to come up successfully")
flunderName := generateFlunderName("rest-flunder")
// kubectl create -f flunders-1.yaml -v 9
// curl -k -v -XPOST https://localhost/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders
// Request Body: {"apiVersion":"wardle.k8s.io/v1alpha1","kind":"Flunder","metadata":{"labels":{"sample-label":"true"},"name":"test-flunder","namespace":"default"}}
flunder := `{"apiVersion":"wardle.k8s.io/v1alpha1","kind":"Flunder","metadata":{"labels":{"sample-label":"true"},"name":"` + flunderName + `","namespace":"default"}}`
result := restClient.Post().AbsPath("/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders").Body([]byte(flunder)).Do()
framework.ExpectNoError(result.Error(), "creating a new flunders resource")
var statusCode int
result.StatusCode(&statusCode)
if statusCode != 201 {
framework.Failf("Flunders client creation response was status %d, not 201", statusCode)
}
pods, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{})
framework.ExpectNoError(result.Error(), "getting pods for flunders service")
// kubectl get flunders -v 9
// curl -k -v -XGET https://localhost/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders
contents, err := restClient.Get().AbsPath("/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders").SetHeader("Accept", "application/json").DoRaw()
framework.ExpectNoError(err, "attempting to get a newly created flunders resource")
var flundersList samplev1alpha1.FlunderList
err = json.Unmarshal(contents, &flundersList)
validateErrorWithDebugInfo(f, err, pods, "Error in unmarshalling %T response from server %s", contents, "/apis/wardle.k8s.io/v1alpha1")
if len(flundersList.Items) != 1 {
framework.Failf("failed to get back the correct flunders list %v", flundersList)
}
// kubectl delete flunder test-flunder -v 9
// curl -k -v -XDELETE https://35.193.112.40/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders/test-flunder
_, err = restClient.Delete().AbsPath("/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders/" + flunderName).DoRaw()
validateErrorWithDebugInfo(f, err, pods, "attempting to delete a newly created flunders(%v) resource", flundersList.Items)
// kubectl get flunders -v 9
// curl -k -v -XGET https://localhost/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders
contents, err = restClient.Get().AbsPath("/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders").SetHeader("Accept", "application/json").DoRaw()
framework.ExpectNoError(err, "confirming delete of a newly created flunders resource")
err = json.Unmarshal(contents, &flundersList)
validateErrorWithDebugInfo(f, err, pods, "Error in unmarshalling %T response from server %s", contents, "/apis/wardle.k8s.io/v1alpha1")
if len(flundersList.Items) != 0 {
framework.Failf("failed to get back the correct deleted flunders list %v", flundersList)
}
flunderName = generateFlunderName("dynamic-flunder")
// Rerun the Create/List/Delete tests using the Dynamic client.
resources, discoveryErr := client.Discovery().ServerPreferredNamespacedResources()
groupVersionResources, err := discovery.GroupVersionResources(resources)
framework.ExpectNoError(err, "getting group version resources for dynamic client")
gvr := schema.GroupVersionResource{Group: "wardle.k8s.io", Version: "v1alpha1", Resource: "flunders"}
_, ok := groupVersionResources[gvr]
if !ok {
framework.Failf("could not find group version resource for dynamic client and wardle/flunders (discovery error: %v, discovery results: %#v)", discoveryErr, groupVersionResources)
}
dynamicClient := f.DynamicClient.Resource(gvr).Namespace(namespace)
// kubectl create -f flunders-1.yaml
// Request Body: {"apiVersion":"wardle.k8s.io/v1alpha1","kind":"Flunder","metadata":{"labels":{"sample-label":"true"},"name":"test-flunder","namespace":"default"}}
testFlunder := samplev1alpha1.Flunder{
TypeMeta: metav1.TypeMeta{
Kind: "Flunder",
APIVersion: "wardle.k8s.io/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{Name: flunderName},
Spec: samplev1alpha1.FlunderSpec{},
}
jsonFlunder, err := json.Marshal(testFlunder)
framework.ExpectNoError(err, "marshalling test-flunder for create using dynamic client")
unstruct := &unstructuredv1.Unstructured{}
err = unstruct.UnmarshalJSON(jsonFlunder)
framework.ExpectNoError(err, "unmarshalling test-flunder as unstructured for create using dynamic client")
unstruct, err = dynamicClient.Create(unstruct)
framework.ExpectNoError(err, "listing flunders using dynamic client")
// kubectl get flunders
unstructuredList, err := dynamicClient.List(metav1.ListOptions{})
framework.ExpectNoError(err, "listing flunders using dynamic client")
if len(unstructuredList.Items) != 1 {
framework.Failf("failed to get back the correct flunders list %v from the dynamic client", unstructuredList)
}
// kubectl delete flunder test-flunder
err = dynamicClient.Delete(flunderName, &metav1.DeleteOptions{})
validateErrorWithDebugInfo(f, err, pods, "deleting flunders(%v) using dynamic client", unstructuredList.Items)
// kubectl get flunders
unstructuredList, err = dynamicClient.List(metav1.ListOptions{})
framework.ExpectNoError(err, "listing flunders using dynamic client")
if len(unstructuredList.Items) != 0 {
framework.Failf("failed to get back the correct deleted flunders list %v from the dynamic client", unstructuredList)
}
cleanTest(client, aggrclient, namespace)
}
// pollTimed will call Poll but time how long Poll actually took.
// It will then framework.logf the msg with the duration of the Poll.
// It is assumed that msg will contain one %s for the elapsed time.
func pollTimed(interval, timeout time.Duration, condition wait.ConditionFunc, msg string) error {
defer func(start time.Time, msg string) {
elapsed := time.Since(start)
framework.Logf(msg, elapsed)
}(time.Now(), msg)
return wait.Poll(interval, timeout, condition)
}
func validateErrorWithDebugInfo(f *framework.Framework, err error, pods *v1.PodList, msg string, fields ...interface{}) {
if err != nil {
namespace := f.Namespace.Name
msg := fmt.Sprintf(msg, fields...)
msg += fmt.Sprintf(" but received unexpected error:\n%v", err)
client := f.ClientSet
ep, err := client.CoreV1().Endpoints(namespace).Get("sample-api", metav1.GetOptions{})
if err == nil {
msg += fmt.Sprintf("\nFound endpoints for sample-api:\n%v", ep)
}
pds, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{})
if err == nil {
msg += fmt.Sprintf("\nFound pods in %s:\n%v", namespace, pds)
msg += fmt.Sprintf("\nOriginal pods in %s:\n%v", namespace, pods)
}
framework.Failf(msg)
}
}
func generateFlunderName(base string) string {
id, err := rand.Int(rand.Reader, big.NewInt(2147483647))
if err != nil {
return base
}
return fmt.Sprintf("%s-%d", base, id)
}

View File

@@ -0,0 +1,90 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apimachinery
import (
"crypto/x509"
"io/ioutil"
"os"
"k8s.io/client-go/util/cert"
"k8s.io/kubernetes/test/e2e/framework"
)
type certContext struct {
cert []byte
key []byte
signingCert []byte
}
// Setup the server cert. For example, user apiservers and admission webhooks
// can use the cert to prove their identify to the kube-apiserver
func setupServerCert(namespaceName, serviceName string) *certContext {
certDir, err := ioutil.TempDir("", "test-e2e-server-cert")
if err != nil {
framework.Failf("Failed to create a temp dir for cert generation %v", err)
}
defer os.RemoveAll(certDir)
signingKey, err := cert.NewPrivateKey()
if err != nil {
framework.Failf("Failed to create CA private key %v", err)
}
signingCert, err := cert.NewSelfSignedCACert(cert.Config{CommonName: "e2e-server-cert-ca"}, signingKey)
if err != nil {
framework.Failf("Failed to create CA cert for apiserver %v", err)
}
caCertFile, err := ioutil.TempFile(certDir, "ca.crt")
if err != nil {
framework.Failf("Failed to create a temp file for ca cert generation %v", err)
}
if err := ioutil.WriteFile(caCertFile.Name(), cert.EncodeCertPEM(signingCert), 0644); err != nil {
framework.Failf("Failed to write CA cert %v", err)
}
key, err := cert.NewPrivateKey()
if err != nil {
framework.Failf("Failed to create private key for %v", err)
}
signedCert, err := cert.NewSignedCert(
cert.Config{
CommonName: serviceName + "." + namespaceName + ".svc",
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
},
key, signingCert, signingKey,
)
if err != nil {
framework.Failf("Failed to create cert%v", err)
}
certFile, err := ioutil.TempFile(certDir, "server.crt")
if err != nil {
framework.Failf("Failed to create a temp file for cert generation %v", err)
}
keyFile, err := ioutil.TempFile(certDir, "server.key")
if err != nil {
framework.Failf("Failed to create a temp file for key generation %v", err)
}
if err = ioutil.WriteFile(certFile.Name(), cert.EncodeCertPEM(signedCert), 0600); err != nil {
framework.Failf("Failed to write cert file %v", err)
}
if err = ioutil.WriteFile(keyFile.Name(), cert.EncodePrivateKeyPEM(key), 0644); err != nil {
framework.Failf("Failed to write key file %v", err)
}
return &certContext{
cert: cert.EncodeCertPEM(signedCert),
key: cert.EncodePrivateKeyPEM(key),
signingCert: cert.EncodeCertPEM(signingCert),
}
}

View File

@@ -0,0 +1,104 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apimachinery
import (
"fmt"
"math/rand"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/test/e2e/framework"
)
const numberOfTotalResources = 400
var _ = SIGDescribe("Servers with support for API chunking", func() {
f := framework.NewDefaultFramework("chunking")
It("should return chunks of results for list calls", func() {
ns := f.Namespace.Name
c := f.ClientSet
client := c.CoreV1().PodTemplates(ns)
By("creating a large number of resources")
workqueue.Parallelize(20, numberOfTotalResources, func(i int) {
for tries := 3; tries >= 0; tries-- {
_, err := client.Create(&v1.PodTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("template-%04d", i),
},
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "test", Image: "test2"},
},
},
},
})
if err == nil {
return
}
framework.Logf("Got an error creating template %d: %v", i, err)
}
Fail("Unable to create template %d, exiting", i)
})
By("retrieving those results in paged fashion several times")
for i := 0; i < 3; i++ {
opts := metav1.ListOptions{}
found := 0
var lastRV string
for {
opts.Limit = int64(rand.Int31n(numberOfTotalResources/10) + 1)
list, err := client.List(opts)
Expect(err).ToNot(HaveOccurred())
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
// TODO: kops PR job is still using etcd2, which prevents this feature from working. Remove this check when kops is upgraded to etcd3
if len(list.Items) > int(opts.Limit) {
framework.Skipf("ERROR: This cluster does not support chunking, which means it is running etcd2 and not supported.")
}
Expect(len(list.Items)).To(BeNumerically("<=", opts.Limit))
if len(lastRV) == 0 {
lastRV = list.ResourceVersion
}
if lastRV != list.ResourceVersion {
Expect(list.ResourceVersion).To(Equal(lastRV))
}
for _, item := range list.Items {
Expect(item.Name).To(Equal(fmt.Sprintf("template-%04d", found)))
found++
}
if len(list.Continue) == 0 {
break
}
opts.Continue = list.Continue
}
Expect(found).To(BeNumerically("==", numberOfTotalResources))
}
By("retrieving those results all at once")
list, err := client.List(metav1.ListOptions{Limit: numberOfTotalResources + 1})
Expect(err).ToNot(HaveOccurred())
Expect(list.Items).To(HaveLen(numberOfTotalResources))
})
})

View File

@@ -0,0 +1,166 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apimachinery
import (
"fmt"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apiextensions-apiserver/test/integration/testserver"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = SIGDescribe("CustomResourceDefinition Watch", func() {
f := framework.NewDefaultFramework("crd-watch")
Context("CustomResourceDefinition Watch", func() {
/*
Testname: crd-watch
Description: Create a Custom Resource Definition and make sure
watches observe events on create/delete.
*/
It("watch on custom resource definition objects", func() {
framework.SkipUnlessServerVersionGTE(crdVersion, f.ClientSet.Discovery())
const (
watchCRNameA = "name1"
watchCRNameB = "name2"
)
config, err := framework.LoadConfig()
if err != nil {
framework.Failf("failed to load config: %v", err)
}
apiExtensionClient, err := clientset.NewForConfig(config)
if err != nil {
framework.Failf("failed to initialize apiExtensionClient: %v", err)
}
noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped)
noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, f.DynamicClient)
if err != nil {
framework.Failf("failed to create CustomResourceDefinition: %v", err)
}
defer func() {
err = testserver.DeleteCustomResourceDefinition(noxuDefinition, apiExtensionClient)
if err != nil {
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
}
}()
ns := ""
noxuResourceClient := newNamespacedCustomResourceClient(ns, f.DynamicClient, noxuDefinition)
watchA, err := watchCRWithName(noxuResourceClient, watchCRNameA)
Expect(err).NotTo(HaveOccurred())
watchB, err := watchCRWithName(noxuResourceClient, watchCRNameB)
Expect(err).NotTo(HaveOccurred())
testCrA := testserver.NewNoxuInstance(ns, watchCRNameA)
testCrB := testserver.NewNoxuInstance(ns, watchCRNameB)
By("Creating first CR ")
testCrA, err = instantiateCustomResource(testCrA, noxuResourceClient, noxuDefinition)
Expect(err).NotTo(HaveOccurred())
expectEvent(watchA, watch.Added, testCrA)
expectNoEvent(watchB, watch.Added, testCrA)
By("Creating second CR")
testCrB, err = instantiateCustomResource(testCrB, noxuResourceClient, noxuDefinition)
Expect(err).NotTo(HaveOccurred())
expectEvent(watchB, watch.Added, testCrB)
expectNoEvent(watchA, watch.Added, testCrB)
By("Deleting first CR")
err = deleteCustomResource(noxuResourceClient, watchCRNameA)
Expect(err).NotTo(HaveOccurred())
expectEvent(watchA, watch.Deleted, nil)
expectNoEvent(watchB, watch.Deleted, nil)
By("Deleting second CR")
err = deleteCustomResource(noxuResourceClient, watchCRNameB)
Expect(err).NotTo(HaveOccurred())
expectEvent(watchB, watch.Deleted, nil)
expectNoEvent(watchA, watch.Deleted, nil)
})
})
})
func watchCRWithName(crdResourceClient dynamic.ResourceInterface, name string) (watch.Interface, error) {
return crdResourceClient.Watch(
metav1.ListOptions{
FieldSelector: "metadata.name=" + name,
TimeoutSeconds: int64ptr(600),
},
)
}
func instantiateCustomResource(instanceToCreate *unstructured.Unstructured, client dynamic.ResourceInterface, definition *apiextensionsv1beta1.CustomResourceDefinition) (*unstructured.Unstructured, error) {
createdInstance, err := client.Create(instanceToCreate)
if err != nil {
return nil, err
}
createdObjectMeta, err := meta.Accessor(createdInstance)
if err != nil {
return nil, err
}
// it should have a UUID
if len(createdObjectMeta.GetUID()) == 0 {
return nil, fmt.Errorf("missing uuid: %#v", createdInstance)
}
createdTypeMeta, err := meta.TypeAccessor(createdInstance)
if err != nil {
return nil, err
}
if e, a := definition.Spec.Group+"/"+definition.Spec.Version, createdTypeMeta.GetAPIVersion(); e != a {
return nil, fmt.Errorf("expected %v, got %v", e, a)
}
if e, a := definition.Spec.Names.Kind, createdTypeMeta.GetKind(); e != a {
return nil, fmt.Errorf("expected %v, got %v", e, a)
}
return createdInstance, nil
}
func deleteCustomResource(client dynamic.ResourceInterface, name string) error {
return client.Delete(name, &metav1.DeleteOptions{})
}
func newNamespacedCustomResourceClient(ns string, client dynamic.Interface, crd *apiextensionsv1beta1.CustomResourceDefinition) dynamic.ResourceInterface {
gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: crd.Spec.Version, Resource: crd.Spec.Names.Plural}
if crd.Spec.Scope != apiextensionsv1beta1.ClusterScoped {
return client.Resource(gvr).Namespace(ns)
} else {
return client.Resource(gvr)
}
}

View File

@@ -0,0 +1,71 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apimachinery
import (
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apiextensions-apiserver/test/integration/testserver"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
var crdVersion = utilversion.MustParseSemantic("v1.7.0")
var _ = SIGDescribe("CustomResourceDefinition resources", func() {
f := framework.NewDefaultFramework("custom-resource-definition")
Context("Simple CustomResourceDefinition", func() {
/*
Testname: crd-creation-test
Description: Create a random Custom Resource Definition and make sure
the API returns success.
*/
framework.ConformanceIt("creating/deleting custom resource definition objects works ", func() {
framework.SkipUnlessServerVersionGTE(crdVersion, f.ClientSet.Discovery())
config, err := framework.LoadConfig()
if err != nil {
framework.Failf("failed to load config: %v", err)
}
apiExtensionClient, err := clientset.NewForConfig(config)
if err != nil {
framework.Failf("failed to initialize apiExtensionClient: %v", err)
}
randomDefinition := testserver.NewRandomNameCustomResourceDefinition(v1beta1.ClusterScoped)
//create CRD and waits for the resource to be recognized and available.
randomDefinition, err = testserver.CreateNewCustomResourceDefinition(randomDefinition, apiExtensionClient, f.DynamicClient)
if err != nil {
framework.Failf("failed to create CustomResourceDefinition: %v", err)
}
defer func() {
err = testserver.DeleteCustomResourceDefinition(randomDefinition, apiExtensionClient)
if err != nil {
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
}
}()
})
})
})

View File

@@ -0,0 +1,141 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apimachinery
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/apps"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
f := framework.NewDefaultFramework("etcd-failure")
BeforeEach(func() {
// This test requires:
// - SSH
// - master access
// ... so the provider check should be identical to the intersection of
// providers that provide those capabilities.
framework.SkipUnlessProviderIs("gce")
Expect(framework.RunRC(testutils.RCConfig{
Client: f.ClientSet,
Name: "baz",
Namespace: f.Namespace.Name,
Image: imageutils.GetPauseImageName(),
Replicas: 1,
})).NotTo(HaveOccurred())
})
It("should recover from network partition with master", func() {
etcdFailTest(
f,
"sudo iptables -A INPUT -p tcp --destination-port 2379 -j DROP",
"sudo iptables -D INPUT -p tcp --destination-port 2379 -j DROP",
)
})
It("should recover from SIGKILL", func() {
etcdFailTest(
f,
"pgrep etcd | xargs -I {} sudo kill -9 {}",
"echo 'do nothing. monit should restart etcd.'",
)
})
})
func etcdFailTest(f *framework.Framework, failCommand, fixCommand string) {
doEtcdFailure(failCommand, fixCommand)
checkExistingRCRecovers(f)
apps.TestReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage)
}
// For this duration, etcd will be failed by executing a failCommand on the master.
// If repeat is true, the failCommand will be called at a rate of once per second for
// the failure duration. If repeat is false, failCommand will only be called once at the
// beginning of the failure duration. After this duration, we execute a fixCommand on the
// master and go on to assert that etcd and kubernetes components recover.
const etcdFailureDuration = 20 * time.Second
func doEtcdFailure(failCommand, fixCommand string) {
By("failing etcd")
masterExec(failCommand)
time.Sleep(etcdFailureDuration)
masterExec(fixCommand)
}
func masterExec(cmd string) {
result, err := framework.SSH(cmd, framework.GetMasterHost()+":22", framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred())
if result.Code != 0 {
framework.LogSSHResult(result)
framework.Failf("master exec command returned non-zero")
}
}
func checkExistingRCRecovers(f *framework.Framework) {
By("assert that the pre-existing replication controller recovers")
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
rcSelector := labels.Set{"name": "baz"}.AsSelector()
By("deleting pods from existing replication controller")
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
pods, err := podClient.List(options)
if err != nil {
framework.Logf("apiserver returned error, as expected before recovery: %v", err)
return false, nil
}
if len(pods.Items) == 0 {
return false, nil
}
for _, pod := range pods.Items {
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
}
framework.Logf("apiserver has recovered")
return true, nil
}))
By("waiting for replication controller to recover")
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items {
if pod.DeletionTimestamp == nil && podutil.IsPodReady(&pod) {
return true, nil
}
}
return false, nil
}))
}

View File

@@ -0,0 +1,23 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apimachinery
import "github.com/onsi/ginkgo"
func SIGDescribe(text string, body func()) bool {
return ginkgo.Describe("[sig-api-machinery] "+text, body)
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,327 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apimachinery
import (
"strconv"
"time"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image"
)
func stagingClientPod(name, value string) v1.Pod {
return v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Ports: []v1.ContainerPort{{ContainerPort: 80}},
},
},
},
}
}
func testingPod(name, value string) v1.Pod {
return v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Ports: []v1.ContainerPort{{ContainerPort: 80}},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/index.html",
Port: intstr.FromInt(8080),
},
},
InitialDelaySeconds: 30,
},
},
},
},
}
}
func observeCreation(w watch.Interface) {
select {
case event, _ := <-w.ResultChan():
if event.Type != watch.Added {
framework.Failf("Failed to observe the creation: %v", event)
}
case <-time.After(30 * time.Second):
framework.Failf("Timeout while waiting for observing the creation")
}
}
func observeObjectDeletion(w watch.Interface) (obj runtime.Object) {
// output to give us a duration to failure. Maybe we aren't getting the
// full timeout for some reason. My guess would be watch failure
framework.Logf("Starting to observe pod deletion")
deleted := false
timeout := false
timer := time.After(framework.DefaultPodDeletionTimeout)
for !deleted && !timeout {
select {
case event, normal := <-w.ResultChan():
if !normal {
framework.Failf("The channel was closed unexpectedly")
return
}
if event.Type == watch.Deleted {
obj = event.Object
deleted = true
}
case <-timer:
timeout = true
}
}
if !deleted {
framework.Failf("Failed to observe pod deletion")
}
return
}
func observerUpdate(w watch.Interface, expectedUpdate func(runtime.Object) bool) {
timer := time.After(30 * time.Second)
updated := false
timeout := false
for !updated && !timeout {
select {
case event, _ := <-w.ResultChan():
if event.Type == watch.Modified {
if expectedUpdate(event.Object) {
updated = true
}
}
case <-timer:
timeout = true
}
}
if !updated {
framework.Failf("Failed to observe pod update")
}
return
}
var _ = SIGDescribe("Generated clientset", func() {
f := framework.NewDefaultFramework("clientset")
It("should create pods, set the deletionTimestamp and deletionGracePeriodSeconds of the pod", func() {
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
By("constructing the pod")
name := "pod" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
podCopy := testingPod(name, value)
pod := &podCopy
By("setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String()
options := metav1.ListOptions{LabelSelector: selector}
pods, err := podClient.List(options)
if err != nil {
framework.Failf("Failed to query for pods: %v", err)
}
Expect(len(pods.Items)).To(Equal(0))
options = metav1.ListOptions{
LabelSelector: selector,
ResourceVersion: pods.ListMeta.ResourceVersion,
}
w, err := podClient.Watch(options)
if err != nil {
framework.Failf("Failed to set up watch: %v", err)
}
By("creating the pod")
pod, err = podClient.Create(pod)
if err != nil {
framework.Failf("Failed to create pod: %v", err)
}
By("verifying the pod is in kubernetes")
options = metav1.ListOptions{
LabelSelector: selector,
ResourceVersion: pod.ResourceVersion,
}
pods, err = podClient.List(options)
if err != nil {
framework.Failf("Failed to query for pods: %v", err)
}
Expect(len(pods.Items)).To(Equal(1))
By("verifying pod creation was observed")
observeCreation(w)
// We need to wait for the pod to be scheduled, otherwise the deletion
// will be carried out immediately rather than gracefully.
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("deleting the pod gracefully")
gracePeriod := int64(31)
if err := podClient.Delete(pod.Name, metav1.NewDeleteOptions(gracePeriod)); err != nil {
framework.Failf("Failed to delete pod: %v", err)
}
By("verifying the deletionTimestamp and deletionGracePeriodSeconds of the pod is set")
observerUpdate(w, func(obj runtime.Object) bool {
pod := obj.(*v1.Pod)
return pod.ObjectMeta.DeletionTimestamp != nil && *pod.ObjectMeta.DeletionGracePeriodSeconds == gracePeriod
})
})
})
func newTestingCronJob(name string, value string) *batchv1beta1.CronJob {
parallelism := int32(1)
completions := int32(1)
return &batchv1beta1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"time": value,
},
},
Spec: batchv1beta1.CronJobSpec{
Schedule: "*/1 * * * ?",
ConcurrencyPolicy: batchv1beta1.AllowConcurrent,
JobTemplate: batchv1beta1.JobTemplateSpec{
Spec: batchv1.JobSpec{
Parallelism: &parallelism,
Completions: &completions,
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyOnFailure,
Volumes: []v1.Volume{
{
Name: "data",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
Containers: []v1.Container{
{
Name: "c",
Image: "busybox",
VolumeMounts: []v1.VolumeMount{
{
MountPath: "/data",
Name: "data",
},
},
},
},
},
},
},
},
},
}
}
var _ = SIGDescribe("Generated clientset", func() {
f := framework.NewDefaultFramework("clientset")
BeforeEach(func() {
framework.SkipIfMissingResource(f.DynamicClient, CronJobGroupVersionResource, f.Namespace.Name)
})
It("should create v1beta1 cronJobs, delete cronJobs, watch cronJobs", func() {
cronJobClient := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name)
By("constructing the cronJob")
name := "cronjob" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
cronJob := newTestingCronJob(name, value)
By("setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String()
options := metav1.ListOptions{LabelSelector: selector}
cronJobs, err := cronJobClient.List(options)
if err != nil {
framework.Failf("Failed to query for cronJobs: %v", err)
}
Expect(len(cronJobs.Items)).To(Equal(0))
options = metav1.ListOptions{
LabelSelector: selector,
ResourceVersion: cronJobs.ListMeta.ResourceVersion,
}
w, err := cronJobClient.Watch(options)
if err != nil {
framework.Failf("Failed to set up watch: %v", err)
}
By("creating the cronJob")
cronJob, err = cronJobClient.Create(cronJob)
if err != nil {
framework.Failf("Failed to create cronJob: %v", err)
}
By("verifying the cronJob is in kubernetes")
options = metav1.ListOptions{
LabelSelector: selector,
ResourceVersion: cronJob.ResourceVersion,
}
cronJobs, err = cronJobClient.List(options)
if err != nil {
framework.Failf("Failed to query for cronJobs: %v", err)
}
Expect(len(cronJobs.Items)).To(Equal(1))
By("verifying cronJob creation was observed")
observeCreation(w)
By("deleting the cronJob")
// Use DeletePropagationBackground so the CronJob is really gone when the call returns.
propagationPolicy := metav1.DeletePropagationBackground
if err := cronJobClient.Delete(cronJob.Name, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil {
framework.Failf("Failed to delete cronJob: %v", err)
}
options = metav1.ListOptions{LabelSelector: selector}
cronJobs, err = cronJobClient.List(options)
if err != nil {
framework.Failf("Failed to list cronJobs to verify deletion: %v", err)
}
Expect(len(cronJobs.Items)).To(Equal(0))
})
})

View File

@@ -0,0 +1,417 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apimachinery
import (
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/admissionregistration/v1alpha1"
"k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
clientretry "k8s.io/client-go/util/retry"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
f := framework.NewDefaultFramework("initializers")
// TODO: Add failure traps once we have JustAfterEach
// See https://github.com/onsi/ginkgo/issues/303
It("should be invisible to controllers by default", func() {
ns := f.Namespace.Name
c := f.ClientSet
podName := "uninitialized-pod"
framework.Logf("Creating pod %s", podName)
ch := make(chan struct{})
go func() {
_, err := c.CoreV1().Pods(ns).Create(newUninitializedPod(podName))
Expect(err).NotTo(HaveOccurred())
close(ch)
}()
// wait to ensure the scheduler does not act on an uninitialized pod
err := wait.PollImmediate(2*time.Second, 15*time.Second, func() (bool, error) {
p, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return false, nil
}
return false, err
}
return len(p.Spec.NodeName) > 0, nil
})
Expect(err).To(Equal(wait.ErrWaitTimeout))
// verify that we can update an initializing pod
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pod.Annotations = map[string]string{"update-1": "test"}
pod, err = c.CoreV1().Pods(ns).Update(pod)
Expect(err).NotTo(HaveOccurred())
// verify the list call filters out uninitialized pods
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{IncludeUninitialized: true})
Expect(err).NotTo(HaveOccurred())
Expect(pods.Items).To(HaveLen(1))
pods, err = c.CoreV1().Pods(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(pods.Items).To(HaveLen(0))
// clear initializers
pod.Initializers = nil
pod, err = c.CoreV1().Pods(ns).Update(pod)
Expect(err).NotTo(HaveOccurred())
// pod should now start running
err = framework.WaitForPodRunningInNamespace(c, pod)
Expect(err).NotTo(HaveOccurred())
// ensure create call returns
<-ch
// verify that we cannot start the pod initializing again
pod, err = c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pod.Initializers = &metav1.Initializers{
Pending: []metav1.Initializer{{Name: "Other"}},
}
_, err = c.CoreV1().Pods(ns).Update(pod)
if !errors.IsInvalid(err) || !strings.Contains(err.Error(), "immutable") {
Fail(fmt.Sprintf("expected invalid error: %v", err))
}
})
It("should dynamically register and apply initializers to pods [Serial]", func() {
ns := f.Namespace.Name
c := f.ClientSet
podName := "uninitialized-pod"
framework.Logf("Creating pod %s", podName)
// create and register an initializer
initializerName := "pod.test.e2e.kubernetes.io"
initializerConfigName := "e2e-test-initializer"
_, err := c.AdmissionregistrationV1alpha1().InitializerConfigurations().Create(&v1alpha1.InitializerConfiguration{
ObjectMeta: metav1.ObjectMeta{Name: initializerConfigName},
Initializers: []v1alpha1.Initializer{
{
Name: initializerName,
Rules: []v1alpha1.Rule{
{APIGroups: []string{""}, APIVersions: []string{"*"}, Resources: []string{"pods"}},
},
},
},
})
if errors.IsNotFound(err) {
framework.Skipf("dynamic configuration of initializers requires the alpha admissionregistration.k8s.io group to be enabled")
}
Expect(err).NotTo(HaveOccurred())
// we must remove the initializer when the test is complete and ensure no pods are pending for that initializer
defer cleanupInitializer(c, initializerConfigName, initializerName)
// poller configuration is 1 second, wait at least that long
time.Sleep(3 * time.Second)
// run create that blocks
ch := make(chan struct{})
go func() {
defer close(ch)
_, err := c.CoreV1().Pods(ns).Create(newInitPod(podName))
Expect(err).NotTo(HaveOccurred())
}()
// wait until the pod shows up uninitialized
By("Waiting until the pod is visible to a client")
var pod *v1.Pod
err = wait.PollImmediate(2*time.Second, 15*time.Second, func() (bool, error) {
pod, err = c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{IncludeUninitialized: true})
if errors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
Expect(pod.Initializers).NotTo(BeNil())
Expect(pod.Initializers.Pending).To(HaveLen(1))
Expect(pod.Initializers.Pending[0].Name).To(Equal(initializerName))
// pretend we are an initializer
By("Completing initialization")
pod.Initializers = nil
pod, err = c.CoreV1().Pods(ns).Update(pod)
Expect(err).NotTo(HaveOccurred())
// ensure create call returns
<-ch
// pod should now start running
err = framework.WaitForPodRunningInNamespace(c, pod)
Expect(err).NotTo(HaveOccurred())
// bypass initialization by explicitly passing an empty pending list
By("Setting an empty initializer as an admin to bypass initialization")
podName = "preinitialized-pod"
pod = newUninitializedPod(podName)
pod.Initializers.Pending = nil
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
Expect(pod.Initializers).To(BeNil())
// bypass initialization for mirror pods
By("Creating a mirror pod that bypasses initialization")
podName = "mirror-pod"
pod = newInitPod(podName)
pod.Annotations = map[string]string{
v1.MirrorPodAnnotationKey: "true",
}
pod.Spec.NodeName = "node-does-not-yet-exist"
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
Expect(pod.Initializers).To(BeNil())
Expect(pod.Annotations[v1.MirrorPodAnnotationKey]).To(Equal("true"))
})
It("don't cause replicaset controller creating extra pods if the initializer is not handled [Serial]", func() {
ns := f.Namespace.Name
c := f.ClientSet
podName := "uninitialized-pod"
framework.Logf("Creating pod %s", podName)
// create and register an initializer, without setting up a controller to handle it.
initializerName := "pod.test.e2e.kubernetes.io"
initializerConfigName := "e2e-test-initializer"
_, err := c.AdmissionregistrationV1alpha1().InitializerConfigurations().Create(&v1alpha1.InitializerConfiguration{
ObjectMeta: metav1.ObjectMeta{Name: initializerConfigName},
Initializers: []v1alpha1.Initializer{
{
Name: initializerName,
Rules: []v1alpha1.Rule{
{APIGroups: []string{""}, APIVersions: []string{"*"}, Resources: []string{"pods"}},
},
},
},
})
if errors.IsNotFound(err) {
framework.Skipf("dynamic configuration of initializers requires the alpha admissionregistration.k8s.io group to be enabled")
}
Expect(err).NotTo(HaveOccurred())
// we must remove the initializer when the test is complete and ensure no pods are pending for that initializer
defer cleanupInitializer(c, initializerConfigName, initializerName)
// poller configuration is 1 second, wait at least that long
time.Sleep(3 * time.Second)
// create a replicaset
persistedRS, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(newReplicaset())
Expect(err).NotTo(HaveOccurred())
// wait for replicaset controller to confirm that it has handled the creation
err = waitForRSObservedGeneration(c, persistedRS.Namespace, persistedRS.Name, persistedRS.Generation)
Expect(err).NotTo(HaveOccurred())
// update the replicaset spec to trigger a resync
patch := []byte(`{"spec":{"minReadySeconds":5}}`)
persistedRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Patch(persistedRS.Name, types.StrategicMergePatchType, patch)
Expect(err).NotTo(HaveOccurred())
// wait for replicaset controller to confirm that it has handle the spec update
err = waitForRSObservedGeneration(c, persistedRS.Namespace, persistedRS.Name, persistedRS.Generation)
Expect(err).NotTo(HaveOccurred())
// verify that the replicaset controller doesn't create extra pod
selector, err := metav1.LabelSelectorAsSelector(persistedRS.Spec.Selector)
Expect(err).NotTo(HaveOccurred())
listOptions := metav1.ListOptions{
LabelSelector: selector.String(),
IncludeUninitialized: true,
}
pods, err := c.CoreV1().Pods(ns).List(listOptions)
Expect(err).NotTo(HaveOccurred())
Expect(len(pods.Items)).Should(Equal(1))
})
It("will be set to nil if a patch removes the last pending initializer", func() {
ns := f.Namespace.Name
c := f.ClientSet
podName := "to-be-patch-initialized-pod"
framework.Logf("Creating pod %s", podName)
// TODO: lower the timeout so that the server responds faster.
_, err := c.CoreV1().Pods(ns).Create(newUninitializedPod(podName))
if err != nil && !errors.IsTimeout(err) {
framework.Failf("expect err to be timeout error, got %v", err)
}
uninitializedPod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(uninitializedPod.Initializers).NotTo(BeNil())
Expect(len(uninitializedPod.Initializers.Pending)).Should(Equal(1))
patch := fmt.Sprintf(`{"metadata":{"initializers":{"pending":[{"$patch":"delete","name":"%s"}]}}}`, uninitializedPod.Initializers.Pending[0].Name)
patchedPod, err := c.CoreV1().Pods(ns).Patch(uninitializedPod.Name, types.StrategicMergePatchType, []byte(patch))
Expect(err).NotTo(HaveOccurred())
Expect(patchedPod.Initializers).To(BeNil())
})
})
func newUninitializedPod(podName string) *v1.Pod {
pod := newInitPod(podName)
pod.Initializers = &metav1.Initializers{
Pending: []metav1.Initializer{{Name: "test.k8s.io"}},
}
return pod
}
func newReplicaset() *v1beta1.ReplicaSet {
name := "initializer-test-replicaset"
replicas := int32(1)
labels := map[string]string{"initializer-test": "single-replicaset"}
return &v1beta1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1beta1.ReplicaSetSpec{
Replicas: &replicas,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &zero,
Containers: []v1.Container{
{
Name: name + "-container",
Image: imageutils.GetE2EImage(imageutils.Porter),
},
},
},
},
},
}
}
func newInitPod(podName string) *v1.Pod {
containerName := fmt.Sprintf("%s-container", podName)
port := 8080
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName,
Image: imageutils.GetE2EImage(imageutils.Porter),
Env: []v1.EnvVar{{Name: fmt.Sprintf("SERVE_PORT_%d", port), Value: "foo"}},
Ports: []v1.ContainerPort{{ContainerPort: int32(port)}},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
return pod
}
// removeInitializersFromAllPods walks all pods and ensures they don't have the provided initializer,
// to guarantee completing the test doesn't block the entire cluster.
func removeInitializersFromAllPods(c clientset.Interface, initializerName string) {
pods, err := c.CoreV1().Pods("").List(metav1.ListOptions{IncludeUninitialized: true})
if err != nil {
return
}
for _, p := range pods.Items {
if p.Initializers == nil {
continue
}
err := clientretry.RetryOnConflict(clientretry.DefaultRetry, func() error {
pod, err := c.CoreV1().Pods(p.Namespace).Get(p.Name, metav1.GetOptions{IncludeUninitialized: true})
if err != nil {
if errors.IsNotFound(err) {
return nil
}
return err
}
if pod.Initializers == nil {
return nil
}
var updated []metav1.Initializer
for _, pending := range pod.Initializers.Pending {
if pending.Name != initializerName {
updated = append(updated, pending)
}
}
if len(updated) == len(pod.Initializers.Pending) {
return nil
}
pod.Initializers.Pending = updated
if len(updated) == 0 {
pod.Initializers = nil
}
framework.Logf("Found initializer on pod %s in ns %s", pod.Name, pod.Namespace)
_, err = c.CoreV1().Pods(p.Namespace).Update(pod)
return err
})
if err != nil {
framework.Logf("Unable to remove initializer from pod %s in ns %s: %v", p.Name, p.Namespace, err)
}
}
}
// remove the initializerConfig, and remove the initializer from all pods
func cleanupInitializer(c clientset.Interface, initializerConfigName, initializerName string) {
if err := c.AdmissionregistrationV1alpha1().InitializerConfigurations().Delete(initializerConfigName, nil); err != nil && !errors.IsNotFound(err) {
framework.Logf("got error on deleting %s", initializerConfigName)
}
// poller configuration is 1 second, wait at least that long
time.Sleep(3 * time.Second)
// clear our initializer from anyone who got it
removeInitializersFromAllPods(c, initializerName)
}
// waits till the RS status.observedGeneration matches metadata.generation.
func waitForRSObservedGeneration(c clientset.Interface, ns, name string, generation int64) error {
return wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
rs, err := c.ExtensionsV1beta1().ReplicaSets(ns).Get(name, metav1.GetOptions{})
if err != nil {
return false, err
}
if generation > rs.Status.ObservedGeneration {
return false, nil
}
return true, nil
})
}

View File

@@ -0,0 +1,278 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apimachinery
import (
"fmt"
"strings"
"sync"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, maxSeconds int) {
var err error
By("Creating testing namespaces")
wg := &sync.WaitGroup{}
wg.Add(totalNS)
for n := 0; n < totalNS; n += 1 {
go func(n int) {
defer wg.Done()
defer GinkgoRecover()
_, err = f.CreateNamespace(fmt.Sprintf("nslifetest-%v", n), nil)
Expect(err).NotTo(HaveOccurred())
}(n)
}
wg.Wait()
//Wait 10 seconds, then SEND delete requests for all the namespaces.
By("Waiting 10 seconds")
time.Sleep(time.Duration(10 * time.Second))
deleted, err := framework.DeleteNamespaces(f.ClientSet, []string{"nslifetest"}, nil /* skipFilter */)
Expect(err).NotTo(HaveOccurred())
Expect(len(deleted)).To(Equal(totalNS))
By("Waiting for namespaces to vanish")
//Now POLL until all namespaces have been eradicated.
framework.ExpectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second,
func() (bool, error) {
var cnt = 0
nsList, err := f.ClientSet.CoreV1().Namespaces().List(metav1.ListOptions{})
if err != nil {
return false, err
}
for _, item := range nsList.Items {
if strings.Contains(item.Name, "nslifetest") {
cnt++
}
}
if cnt > maxAllowedAfterDel {
framework.Logf("Remaining namespaces : %v", cnt)
return false, nil
}
return true, nil
}))
}
func waitForPodInNamespace(c clientset.Interface, ns, podName string) *v1.Pod {
var pod *v1.Pod
var err error
err = wait.PollImmediate(2*time.Second, 15*time.Second, func() (bool, error) {
pod, err = c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{IncludeUninitialized: true})
if errors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
return pod
}
func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
By("Creating a test namespace")
namespace, err := f.CreateNamespace("nsdeletetest", nil)
Expect(err).NotTo(HaveOccurred())
By("Waiting for a default service account to be provisioned in namespace")
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
Expect(err).NotTo(HaveOccurred())
By("Creating a pod in the namespace")
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetPauseImageName(),
},
},
},
}
pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Waiting for the pod to have running status")
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
By("Creating an uninitialized pod in the namespace")
podB := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod-uninitialized",
Initializers: &metav1.Initializers{Pending: []metav1.Initializer{{Name: "test.initializer.k8s.io"}}},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetPauseImageName(),
},
},
},
}
go func() {
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(podB)
// This error is ok, because we will delete the pod before it completes initialization
framework.Logf("error from create uninitialized namespace: %v", err)
}()
podB = waitForPodInNamespace(f.ClientSet, namespace.Name, podB.Name)
By("Deleting the namespace")
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
Expect(err).NotTo(HaveOccurred())
By("Waiting for the namespace to be removed.")
maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds
framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
func() (bool, error) {
_, err = f.ClientSet.CoreV1().Namespaces().Get(namespace.Name, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return true, nil
}
return false, nil
}))
By("Recreating the namespace")
namespace, err = f.CreateNamespace("nsdeletetest", nil)
Expect(err).NotTo(HaveOccurred())
By("Verifying there are no pods in the namespace")
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).To(HaveOccurred())
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(podB.Name, metav1.GetOptions{IncludeUninitialized: true})
Expect(err).To(HaveOccurred())
}
func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
var err error
By("Creating a test namespace")
namespace, err := f.CreateNamespace("nsdeletetest", nil)
Expect(err).NotTo(HaveOccurred())
By("Waiting for a default service account to be provisioned in namespace")
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
Expect(err).NotTo(HaveOccurred())
By("Creating a service in the namespace")
serviceName := "test-service"
labels := map[string]string{
"foo": "bar",
"baz": "blah",
}
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
},
Spec: v1.ServiceSpec{
Selector: labels,
Ports: []v1.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
}},
},
}
service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(service)
Expect(err).NotTo(HaveOccurred())
By("Deleting the namespace")
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
Expect(err).NotTo(HaveOccurred())
By("Waiting for the namespace to be removed.")
maxWaitSeconds := int64(60)
framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
func() (bool, error) {
_, err = f.ClientSet.CoreV1().Namespaces().Get(namespace.Name, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return true, nil
}
return false, nil
}))
By("Recreating the namespace")
namespace, err = f.CreateNamespace("nsdeletetest", nil)
Expect(err).NotTo(HaveOccurred())
By("Verifying there is no service in the namespace")
_, err = f.ClientSet.CoreV1().Services(namespace.Name).Get(service.Name, metav1.GetOptions{})
Expect(err).To(HaveOccurred())
}
// This test must run [Serial] due to the impact of running other parallel
// tests can have on its performance. Each test that follows the common
// test framework follows this pattern:
// 1. Create a Namespace
// 2. Do work that generates content in that namespace
// 3. Delete a Namespace
// Creation of a Namespace is non-trivial since it requires waiting for a
// ServiceAccount to be generated.
// Deletion of a Namespace is non-trivial and performance intensive since
// its an orchestrated process. The controller that handles deletion must
// query the namespace for all existing content, and then delete each piece
// of content in turn. As the API surface grows to add more KIND objects
// that could exist in a Namespace, the number of calls that the namespace
// controller must orchestrate grows since it must LIST, DELETE (1x1) each
// KIND.
// There is work underway to improve this, but it's
// most likely not going to get significantly better until etcd v3.
// Going back to this test, this test generates 100 Namespace objects, and then
// rapidly deletes all of them. This causes the NamespaceController to observe
// and attempt to process a large number of deletes concurrently. In effect,
// it's like running 100 traditional e2e tests in parallel. If the namespace
// controller orchestrating deletes is slowed down deleting another test's
// content then this test may fail. Since the goal of this test is to soak
// Namespace creation, and soak Namespace deletion, its not appropriate to
// further soak the cluster with other parallel Namespace deletion activities
// that each have a variable amount of content in the associated Namespace.
// When run in [Serial] this test appears to delete Namespace objects at a
// rate of approximately 1 per second.
var _ = SIGDescribe("Namespaces [Serial]", func() {
f := framework.NewDefaultFramework("namespaces")
It("should ensure that all pods are removed when a namespace is deleted.",
func() { ensurePodsAreRemovedWhenNamespaceIsDeleted(f) })
It("should ensure that all services are removed when a namespace is deleted.",
func() { ensureServicesAreRemovedWhenNamespaceIsDeleted(f) })
It("should delete fast enough (90 percent of 100 namespaces in 150 seconds)",
func() { extinguish(f, 100, 10, 150) })
// On hold until etcd3; see #7372
It("should always delete fast (ALL of 100 namespaces in 150 seconds) [Feature:ComprehensiveNamespaceDraining]",
func() { extinguish(f, 100, 0, 150) })
})

View File

@@ -0,0 +1,197 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apimachinery
import (
"bytes"
"fmt"
"text/tabwriter"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
authorizationv1 "k8s.io/api/authorization/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/printers"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var serverPrintVersion = utilversion.MustParseSemantic("v1.10.0")
var _ = SIGDescribe("Servers with support for Table transformation", func() {
f := framework.NewDefaultFramework("tables")
BeforeEach(func() {
framework.SkipUnlessServerVersionGTE(serverPrintVersion, f.ClientSet.Discovery())
})
It("should return pod details", func() {
ns := f.Namespace.Name
c := f.ClientSet
podName := "pod-1"
framework.Logf("Creating pod %s", podName)
_, err := c.CoreV1().Pods(ns).Create(newTablePod(podName))
Expect(err).NotTo(HaveOccurred())
table := &metav1beta1.Table{}
err = c.CoreV1().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Table: %#v", table)
Expect(len(table.ColumnDefinitions)).To(BeNumerically(">", 2))
Expect(len(table.Rows)).To(Equal(1))
Expect(len(table.Rows[0].Cells)).To(Equal(len(table.ColumnDefinitions)))
Expect(table.ColumnDefinitions[0].Name).To(Equal("Name"))
Expect(table.Rows[0].Cells[0]).To(Equal(podName))
out := printTable(table)
Expect(out).To(MatchRegexp("^NAME\\s"))
Expect(out).To(MatchRegexp("\npod-1\\s"))
framework.Logf("Table:\n%s", out)
})
It("should return chunks of table results for list calls", func() {
ns := f.Namespace.Name
c := f.ClientSet
client := c.CoreV1().PodTemplates(ns)
By("creating a large number of resources")
workqueue.Parallelize(5, 20, func(i int) {
for tries := 3; tries >= 0; tries-- {
_, err := client.Create(&v1.PodTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("template-%04d", i),
},
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "test", Image: "test2"},
},
},
},
})
if err == nil {
return
}
framework.Logf("Got an error creating template %d: %v", i, err)
}
Fail("Unable to create template %d, exiting", i)
})
pagedTable := &metav1beta1.Table{}
err := c.CoreV1().RESTClient().Get().Namespace(ns).Resource("podtemplates").
VersionedParams(&metav1.ListOptions{Limit: 2}, metav1.ParameterCodec).
SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").
Do().Into(pagedTable)
Expect(err).NotTo(HaveOccurred())
// TODO: kops PR job is still using etcd2, which prevents this feature from working. Remove this check when kops is upgraded to etcd3
if len(pagedTable.Rows) > 2 {
framework.Skipf("ERROR: This cluster does not support chunking, which means it is running etcd2 and not supported.")
}
Expect(len(pagedTable.Rows)).To(Equal(2))
Expect(pagedTable.ResourceVersion).ToNot(Equal(""))
Expect(pagedTable.SelfLink).ToNot(Equal(""))
Expect(pagedTable.Continue).ToNot(Equal(""))
Expect(pagedTable.Rows[0].Cells[0]).To(Equal("template-0000"))
Expect(pagedTable.Rows[1].Cells[0]).To(Equal("template-0001"))
err = c.CoreV1().RESTClient().Get().Namespace(ns).Resource("podtemplates").
VersionedParams(&metav1.ListOptions{Continue: pagedTable.Continue}, metav1.ParameterCodec).
SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").
Do().Into(pagedTable)
Expect(err).NotTo(HaveOccurred())
Expect(len(pagedTable.Rows)).To(BeNumerically(">", 0))
Expect(pagedTable.Rows[0].Cells[0]).To(Equal("template-0002"))
})
It("should return generic metadata details across all namespaces for nodes", func() {
c := f.ClientSet
table := &metav1beta1.Table{}
err := c.CoreV1().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Table: %#v", table)
Expect(len(table.ColumnDefinitions)).To(BeNumerically(">=", 2))
Expect(len(table.Rows)).To(BeNumerically(">=", 1))
Expect(len(table.Rows[0].Cells)).To(Equal(len(table.ColumnDefinitions)))
Expect(table.ColumnDefinitions[0].Name).To(Equal("Name"))
Expect(table.ResourceVersion).ToNot(Equal(""))
Expect(table.SelfLink).ToNot(Equal(""))
out := printTable(table)
Expect(out).To(MatchRegexp("^NAME\\s"))
framework.Logf("Table:\n%s", out)
})
It("should return a 406 for a backend which does not implement metadata", func() {
c := f.ClientSet
table := &metav1beta1.Table{}
sar := &authorizationv1.SelfSubjectAccessReview{
Spec: authorizationv1.SelfSubjectAccessReviewSpec{
NonResourceAttributes: &authorizationv1.NonResourceAttributes{
Path: "/",
Verb: "get",
},
},
}
err := c.AuthorizationV1().RESTClient().Post().Resource("selfsubjectaccessreviews").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Body(sar).Do().Into(table)
Expect(err).To(HaveOccurred())
Expect(err.(errors.APIStatus).Status().Code).To(Equal(int32(406)))
})
})
func printTable(table *metav1beta1.Table) string {
buf := &bytes.Buffer{}
tw := tabwriter.NewWriter(buf, 5, 8, 1, ' ', 0)
err := printers.PrintTable(table, tw, printers.PrintOptions{})
Expect(err).NotTo(HaveOccurred())
tw.Flush()
return buf.String()
}
func newTablePod(podName string) *v1.Pod {
containerName := fmt.Sprintf("%s-container", podName)
port := 8080
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName,
Image: imageutils.GetE2EImage(imageutils.Porter),
Env: []v1.EnvVar{{Name: fmt.Sprintf("SERVE_PORT_%d", port), Value: "foo"}},
Ports: []v1.ContainerPort{{ContainerPort: int32(port)}},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
return pod
}

383
vendor/k8s.io/kubernetes/test/e2e/apimachinery/watch.go generated vendored Normal file
View File

@@ -0,0 +1,383 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apimachinery
import (
"time"
"k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
watchConfigMapLabelKey = "watch-this-configmap"
multipleWatchersLabelValueA = "multiple-watchers-A"
multipleWatchersLabelValueB = "multiple-watchers-B"
fromResourceVersionLabelValue = "from-resource-version"
watchRestartedLabelValue = "watch-closed-and-restarted"
toBeChangedLabelValue = "label-changed-and-restored"
)
var _ = SIGDescribe("Watchers", func() {
f := framework.NewDefaultFramework("watch")
/*
Testname: watch-configmaps-with-multiple-watchers
Description: Ensure that multiple watchers are able to receive all add,
update, and delete notifications on configmaps that match a label selector and do
not receive notifications for configmaps which do not match that label selector.
*/
framework.ConformanceIt("should observe add, update, and delete watch notifications on configmaps", func() {
c := f.ClientSet
ns := f.Namespace.Name
By("creating a watch on configmaps with label A")
watchA, err := watchConfigMaps(f, "", multipleWatchersLabelValueA)
Expect(err).NotTo(HaveOccurred())
By("creating a watch on configmaps with label B")
watchB, err := watchConfigMaps(f, "", multipleWatchersLabelValueB)
Expect(err).NotTo(HaveOccurred())
By("creating a watch on configmaps with label A or B")
watchAB, err := watchConfigMaps(f, "", multipleWatchersLabelValueA, multipleWatchersLabelValueB)
Expect(err).NotTo(HaveOccurred())
testConfigMapA := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "e2e-watch-test-configmap-a",
Labels: map[string]string{
watchConfigMapLabelKey: multipleWatchersLabelValueA,
},
},
}
testConfigMapB := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "e2e-watch-test-configmap-b",
Labels: map[string]string{
watchConfigMapLabelKey: multipleWatchersLabelValueB,
},
},
}
By("creating a configmap with label A and ensuring the correct watchers observe the notification")
testConfigMapA, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapA)
Expect(err).NotTo(HaveOccurred())
expectEvent(watchA, watch.Added, testConfigMapA)
expectEvent(watchAB, watch.Added, testConfigMapA)
expectNoEvent(watchB, watch.Added, testConfigMapA)
By("modifying configmap A and ensuring the correct watchers observe the notification")
testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "1")
})
Expect(err).NotTo(HaveOccurred())
expectEvent(watchA, watch.Modified, testConfigMapA)
expectEvent(watchAB, watch.Modified, testConfigMapA)
expectNoEvent(watchB, watch.Modified, testConfigMapA)
By("modifying configmap A again and ensuring the correct watchers observe the notification")
testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "2")
})
Expect(err).NotTo(HaveOccurred())
expectEvent(watchA, watch.Modified, testConfigMapA)
expectEvent(watchAB, watch.Modified, testConfigMapA)
expectNoEvent(watchB, watch.Modified, testConfigMapA)
By("deleting configmap A and ensuring the correct watchers observe the notification")
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapA.GetName(), nil)
Expect(err).NotTo(HaveOccurred())
expectEvent(watchA, watch.Deleted, nil)
expectEvent(watchAB, watch.Deleted, nil)
expectNoEvent(watchB, watch.Deleted, nil)
By("creating a configmap with label B and ensuring the correct watchers observe the notification")
testConfigMapB, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapB)
Expect(err).NotTo(HaveOccurred())
expectEvent(watchB, watch.Added, testConfigMapB)
expectEvent(watchAB, watch.Added, testConfigMapB)
expectNoEvent(watchA, watch.Added, testConfigMapB)
By("deleting configmap B and ensuring the correct watchers observe the notification")
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapB.GetName(), nil)
Expect(err).NotTo(HaveOccurred())
expectEvent(watchB, watch.Deleted, nil)
expectEvent(watchAB, watch.Deleted, nil)
expectNoEvent(watchA, watch.Deleted, nil)
})
/*
Testname: watch-configmaps-from-resource-version
Description: Ensure that a watch can be opened from a particular resource version
in the past and only notifications happening after that resource version are observed.
*/
framework.ConformanceIt("should be able to start watching from a specific resource version", func() {
c := f.ClientSet
ns := f.Namespace.Name
testConfigMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "e2e-watch-test-resource-version",
Labels: map[string]string{
watchConfigMapLabelKey: fromResourceVersionLabelValue,
},
},
}
By("creating a new configmap")
testConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
Expect(err).NotTo(HaveOccurred())
By("modifying the configmap once")
testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "1")
})
Expect(err).NotTo(HaveOccurred())
By("modifying the configmap a second time")
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "2")
})
Expect(err).NotTo(HaveOccurred())
By("deleting the configmap")
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
Expect(err).NotTo(HaveOccurred())
By("creating a watch on configmaps from the resource version returned by the first update")
testWatch, err := watchConfigMaps(f, testConfigMapFirstUpdate.ObjectMeta.ResourceVersion, fromResourceVersionLabelValue)
Expect(err).NotTo(HaveOccurred())
By("Expecting to observe notifications for all changes to the configmap after the first update")
expectEvent(testWatch, watch.Modified, testConfigMapSecondUpdate)
expectEvent(testWatch, watch.Deleted, nil)
})
/*
Testname: watch-configmaps-closed-and-restarted
Description: Ensure that a watch can be reopened from the last resource version
observed by the previous watch, and it will continue delivering notifications from
that point in time.
*/
framework.ConformanceIt("should be able to restart watching from the last resource version observed by the previous watch", func() {
c := f.ClientSet
ns := f.Namespace.Name
testConfigMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "e2e-watch-test-watch-closed",
Labels: map[string]string{
watchConfigMapLabelKey: watchRestartedLabelValue,
},
},
}
By("creating a watch on configmaps")
testWatchBroken, err := watchConfigMaps(f, "", watchRestartedLabelValue)
Expect(err).NotTo(HaveOccurred())
By("creating a new configmap")
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
Expect(err).NotTo(HaveOccurred())
By("modifying the configmap once")
_, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "1")
})
Expect(err).NotTo(HaveOccurred())
By("closing the watch once it receives two notifications")
expectEvent(testWatchBroken, watch.Added, testConfigMap)
lastEvent, ok := waitForEvent(testWatchBroken, watch.Modified, nil, 1*time.Minute)
if !ok {
framework.Failf("Timed out waiting for second watch notification")
}
testWatchBroken.Stop()
By("modifying the configmap a second time, while the watch is closed")
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "2")
})
Expect(err).NotTo(HaveOccurred())
By("creating a new watch on configmaps from the last resource version observed by the first watch")
lastEventConfigMap, ok := lastEvent.Object.(*v1.ConfigMap)
if !ok {
framework.Failf("Expected last notfication to refer to a configmap but got: %v", lastEvent)
}
testWatchRestarted, err := watchConfigMaps(f, lastEventConfigMap.ObjectMeta.ResourceVersion, watchRestartedLabelValue)
Expect(err).NotTo(HaveOccurred())
By("deleting the configmap")
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
Expect(err).NotTo(HaveOccurred())
By("Expecting to observe notifications for all changes to the configmap since the first watch closed")
expectEvent(testWatchRestarted, watch.Modified, testConfigMapSecondUpdate)
expectEvent(testWatchRestarted, watch.Deleted, nil)
})
/*
Testname: watch-configmaps-label-changed
Description: Ensure that a watched object stops meeting the requirements of
a watch's selector, the watch will observe a delete, and will not observe
notifications for that object until it meets the selector's requirements again.
*/
framework.ConformanceIt("should observe an object deletion if it stops meeting the requirements of the selector", func() {
c := f.ClientSet
ns := f.Namespace.Name
testConfigMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "e2e-watch-test-label-changed",
Labels: map[string]string{
watchConfigMapLabelKey: toBeChangedLabelValue,
},
},
}
By("creating a watch on configmaps with a certain label")
testWatch, err := watchConfigMaps(f, "", toBeChangedLabelValue)
Expect(err).NotTo(HaveOccurred())
By("creating a new configmap")
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
Expect(err).NotTo(HaveOccurred())
By("modifying the configmap once")
testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "1")
})
Expect(err).NotTo(HaveOccurred())
By("changing the label value of the configmap")
_, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
cm.ObjectMeta.Labels[watchConfigMapLabelKey] = "wrong-value"
})
Expect(err).NotTo(HaveOccurred())
By("Expecting to observe a delete notification for the watched object")
expectEvent(testWatch, watch.Added, testConfigMap)
expectEvent(testWatch, watch.Modified, testConfigMapFirstUpdate)
expectEvent(testWatch, watch.Deleted, nil)
By("modifying the configmap a second time")
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "2")
})
Expect(err).NotTo(HaveOccurred())
By("Expecting not to observe a notification because the object no longer meets the selector's requirements")
expectNoEvent(testWatch, watch.Modified, testConfigMapSecondUpdate)
By("changing the label value of the configmap back")
testConfigMapLabelRestored, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
cm.ObjectMeta.Labels[watchConfigMapLabelKey] = toBeChangedLabelValue
})
Expect(err).NotTo(HaveOccurred())
By("modifying the configmap a third time")
testConfigMapThirdUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "3")
})
Expect(err).NotTo(HaveOccurred())
By("deleting the configmap")
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
Expect(err).NotTo(HaveOccurred())
By("Expecting to observe an add notification for the watched object when the label value was restored")
expectEvent(testWatch, watch.Added, testConfigMapLabelRestored)
expectEvent(testWatch, watch.Modified, testConfigMapThirdUpdate)
expectEvent(testWatch, watch.Deleted, nil)
})
})
func watchConfigMaps(f *framework.Framework, resourceVersion string, labels ...string) (watch.Interface, error) {
c := f.ClientSet
ns := f.Namespace.Name
opts := metav1.ListOptions{
ResourceVersion: resourceVersion,
LabelSelector: metav1.FormatLabelSelector(&metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: watchConfigMapLabelKey,
Operator: metav1.LabelSelectorOpIn,
Values: labels,
},
},
}),
}
return c.CoreV1().ConfigMaps(ns).Watch(opts)
}
func int64ptr(i int) *int64 {
i64 := int64(i)
return &i64
}
func setConfigMapData(cm *v1.ConfigMap, key, value string) {
if cm.Data == nil {
cm.Data = make(map[string]string)
}
cm.Data[key] = value
}
func expectEvent(w watch.Interface, eventType watch.EventType, object runtime.Object) {
if event, ok := waitForEvent(w, eventType, object, 1*time.Minute); !ok {
framework.Failf("Timed out waiting for expected watch notification: %v", event)
}
}
func expectNoEvent(w watch.Interface, eventType watch.EventType, object runtime.Object) {
if event, ok := waitForEvent(w, eventType, object, 10*time.Second); ok {
framework.Failf("Unexpected watch notification observed: %v", event)
}
}
func waitForEvent(w watch.Interface, expectType watch.EventType, expectObject runtime.Object, duration time.Duration) (watch.Event, bool) {
stopTimer := time.NewTimer(duration)
defer stopTimer.Stop()
for {
select {
case actual, ok := <-w.ResultChan():
if ok {
framework.Logf("Got : %v %v", actual.Type, actual.Object)
} else {
framework.Failf("Watch closed unexpectedly")
}
if expectType == actual.Type && (expectObject == nil || apiequality.Semantic.DeepEqual(expectObject, actual.Object)) {
return actual, true
}
case <-stopTimer.C:
expected := watch.Event{
Type: expectType,
Object: expectObject,
}
return expected, false
}
}
}

1235
vendor/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go generated vendored Normal file

File diff suppressed because it is too large Load Diff