Bumping k8s dependencies to 1.13
This commit is contained in:
46
vendor/k8s.io/kubernetes/test/e2e/network/BUILD
generated
vendored
46
vendor/k8s.io/kubernetes/test/e2e/network/BUILD
generated
vendored
@@ -27,7 +27,6 @@ go_library(
|
||||
"proxy.go",
|
||||
"service.go",
|
||||
"service_latency.go",
|
||||
"serviceloadbalancers.go",
|
||||
"util_iperf.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/network",
|
||||
@@ -40,8 +39,29 @@ go_library(
|
||||
"//pkg/controller/endpoint:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/networking/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/manifest:go_default_library",
|
||||
"//test/e2e/network/scale:go_default_library",
|
||||
"//test/images/net/nat:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
@@ -50,28 +70,6 @@ go_library(
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v0.alpha:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/networking/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
33
vendor/k8s.io/kubernetes/test/e2e/network/dns.go
generated
vendored
33
vendor/k8s.io/kubernetes/test/e2e/network/dns.go
generated
vendored
@@ -37,8 +37,9 @@ var _ = SIGDescribe("DNS", func() {
|
||||
f := framework.NewDefaultFramework("dns")
|
||||
|
||||
/*
|
||||
Testname: dns-for-clusters
|
||||
Description: Make sure that DNS can resolve the names of clusters.
|
||||
Release : v1.9
|
||||
Testname: DNS, cluster
|
||||
Description: When a Pod is created, the pod MUST be able to resolve cluster dns entries such as kubernetes.default via DNS and /etc/hosts.
|
||||
*/
|
||||
framework.ConformanceIt("should provide DNS for the cluster ", func() {
|
||||
// All the names we need to be able to resolve.
|
||||
@@ -67,8 +68,9 @@ var _ = SIGDescribe("DNS", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: dns-for-services
|
||||
Description: Make sure that DNS can resolve the names of services.
|
||||
Release : v1.9
|
||||
Testname: DNS, services
|
||||
Description: When a headless service is created, the service MUST be able to resolve all the required service endpoints. When the service is created, any pod in the same namespace must be able to resolve the service by all of the expected DNS names.
|
||||
*/
|
||||
framework.ConformanceIt("should provide DNS for services ", func() {
|
||||
// Create a test headless service.
|
||||
@@ -78,16 +80,17 @@ var _ = SIGDescribe("DNS", func() {
|
||||
}
|
||||
headlessService := framework.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
|
||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create headless service: %s", dnsTestServiceName)
|
||||
defer func() {
|
||||
By("deleting the test headless service")
|
||||
defer GinkgoRecover()
|
||||
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil)
|
||||
}()
|
||||
|
||||
regularService := framework.CreateServiceSpec("test-service-2", "", false, testServiceSelector)
|
||||
regularServiceName := "test-service-2"
|
||||
regularService := framework.CreateServiceSpec(regularServiceName, "", false, testServiceSelector)
|
||||
regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create regular service: %s", regularServiceName)
|
||||
defer func() {
|
||||
By("deleting the test service")
|
||||
defer GinkgoRecover()
|
||||
@@ -128,7 +131,7 @@ var _ = SIGDescribe("DNS", func() {
|
||||
podHostname := "dns-querier-2"
|
||||
headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector)
|
||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create headless service: %s", serviceName)
|
||||
defer func() {
|
||||
By("deleting the test headless service")
|
||||
defer GinkgoRecover()
|
||||
@@ -159,7 +162,7 @@ var _ = SIGDescribe("DNS", func() {
|
||||
serviceName := "dns-test-service-3"
|
||||
externalNameService := framework.CreateServiceSpec(serviceName, "foo.example.com", false, nil)
|
||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create ExternalName service: %s", serviceName)
|
||||
defer func() {
|
||||
By("deleting the test externalName service")
|
||||
defer GinkgoRecover()
|
||||
@@ -183,7 +186,7 @@ var _ = SIGDescribe("DNS", func() {
|
||||
_, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
|
||||
s.Spec.ExternalName = "bar.example.com"
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to change externalName of service: %s", serviceName)
|
||||
wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy")
|
||||
jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "jessie")
|
||||
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
@@ -200,10 +203,10 @@ var _ = SIGDescribe("DNS", func() {
|
||||
_, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
|
||||
s.Spec.Type = v1.ServiceTypeClusterIP
|
||||
s.Spec.Ports = []v1.ServicePort{
|
||||
{Port: 80, Name: "http", Protocol: "TCP"},
|
||||
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
|
||||
}
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to change service type to ClusterIP for service: %s", serviceName)
|
||||
wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "A", "wheezy")
|
||||
jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "A", "jessie")
|
||||
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
@@ -214,7 +217,7 @@ var _ = SIGDescribe("DNS", func() {
|
||||
pod3 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
|
||||
svc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Get(externalNameService.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get service: %s", externalNameService.Name)
|
||||
|
||||
validateTargetedProbeOutput(f, pod3, []string{wheezyFileName, jessieFileName}, svc.Spec.ClusterIP)
|
||||
})
|
||||
@@ -230,7 +233,7 @@ var _ = SIGDescribe("DNS", func() {
|
||||
testDNSNameFull: testInjectedIP,
|
||||
})
|
||||
testServerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testServerPod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s", testServerPod.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %s", testServerPod.Name)
|
||||
framework.Logf("Created pod %v", testServerPod)
|
||||
defer func() {
|
||||
framework.Logf("Deleting pod %s...", testServerPod.Name)
|
||||
@@ -261,7 +264,7 @@ var _ = SIGDescribe("DNS", func() {
|
||||
},
|
||||
}
|
||||
testUtilsPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testUtilsPod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s", testUtilsPod.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %s", testUtilsPod.Name)
|
||||
framework.Logf("Created pod %v", testUtilsPod)
|
||||
defer func() {
|
||||
framework.Logf("Deleting pod %s...", testUtilsPod.Name)
|
||||
|
59
vendor/k8s.io/kubernetes/test/e2e/network/dns_common.go
generated
vendored
59
vendor/k8s.io/kubernetes/test/e2e/network/dns_common.go
generated
vendored
@@ -65,8 +65,9 @@ func (t *dnsTestCommon) init() {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
|
||||
pods, err := t.f.ClientSet.CoreV1().Pods("kube-system").List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
namespace := "kube-system"
|
||||
pods, err := t.f.ClientSet.CoreV1().Pods(namespace).List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s", namespace)
|
||||
Expect(len(pods.Items)).Should(BeNumerically(">=", 1))
|
||||
|
||||
t.dnsPod = &pods.Items[0]
|
||||
@@ -155,23 +156,23 @@ func (t *dnsTestCommon) setConfigMap(cm *v1.ConfigMap) {
|
||||
}.AsSelector().String(),
|
||||
}
|
||||
cmList, err := t.c.CoreV1().ConfigMaps(t.ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to list ConfigMaps in namespace: %s", t.ns)
|
||||
|
||||
if len(cmList.Items) == 0 {
|
||||
By(fmt.Sprintf("Creating the ConfigMap (%s:%s) %+v", t.ns, t.name, *cm))
|
||||
_, err := t.c.CoreV1().ConfigMaps(t.ns).Create(cm)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create ConfigMap (%s:%s) %+v", t.ns, t.name, *cm)
|
||||
} else {
|
||||
By(fmt.Sprintf("Updating the ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm))
|
||||
_, err := t.c.CoreV1().ConfigMaps(t.ns).Update(cm)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *dnsTestCommon) fetchDNSConfigMapData() map[string]string {
|
||||
if t.name == "coredns" {
|
||||
pcm, err := t.c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(t.name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get DNS ConfigMap: %s", t.name)
|
||||
return pcm.Data
|
||||
}
|
||||
return nil
|
||||
@@ -190,7 +191,7 @@ func (t *dnsTestCommon) deleteConfigMap() {
|
||||
By(fmt.Sprintf("Deleting the ConfigMap (%s:%s)", t.ns, t.name))
|
||||
t.cm = nil
|
||||
err := t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete config map: %s", t.name)
|
||||
}
|
||||
|
||||
func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
|
||||
@@ -213,7 +214,7 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
|
||||
Image: imageutils.GetE2EImage(imageutils.Dnsutils),
|
||||
Command: []string{"sleep", "10000"},
|
||||
Ports: []v1.ContainerPort{
|
||||
{ContainerPort: servicePort, Protocol: "TCP"},
|
||||
{ContainerPort: servicePort, Protocol: v1.ProtocolTCP},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -222,9 +223,9 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
|
||||
|
||||
var err error
|
||||
t.utilPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.utilPod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %v", t.utilPod)
|
||||
framework.Logf("Created pod %v", t.utilPod)
|
||||
Expect(t.f.WaitForPodRunning(t.utilPod.Name)).NotTo(HaveOccurred())
|
||||
Expect(t.f.WaitForPodRunning(t.utilPod.Name)).NotTo(HaveOccurred(), "pod failed to start running: %v", t.utilPod)
|
||||
|
||||
t.utilService = &v1.Service{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -238,7 +239,7 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
|
||||
Selector: map[string]string{"app": baseName},
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Protocol: "TCP",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: servicePort,
|
||||
TargetPort: intstr.FromInt(servicePort),
|
||||
},
|
||||
@@ -247,14 +248,14 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
|
||||
}
|
||||
|
||||
t.utilService, err = t.c.CoreV1().Services(t.f.Namespace.Name).Create(t.utilService)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create service: %s/%s", t.f.Namespace.Name, t.utilService.ObjectMeta.Name)
|
||||
framework.Logf("Created service %v", t.utilService)
|
||||
}
|
||||
|
||||
func (t *dnsTestCommon) deleteUtilPod() {
|
||||
podClient := t.c.CoreV1().Pods(t.f.Namespace.Name)
|
||||
if err := podClient.Delete(t.utilPod.Name, metav1.NewDeleteOptions(0)); err != nil {
|
||||
framework.Logf("Delete of pod %v:%v failed: %v",
|
||||
framework.Logf("Delete of pod %v/%v failed: %v",
|
||||
t.utilPod.Namespace, t.utilPod.Name, err)
|
||||
}
|
||||
}
|
||||
@@ -270,7 +271,7 @@ func (t *dnsTestCommon) deleteCoreDNSPods() {
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete pod: %s", pod.Name)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -286,7 +287,7 @@ func generateDNSServerPod(aRecords map[string]string) *v1.Pod {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dns",
|
||||
Image: imageutils.GetE2EImage(imageutils.DNSMasq),
|
||||
Image: imageutils.GetE2EImage(imageutils.Dnsutils),
|
||||
Command: []string{
|
||||
"/usr/sbin/dnsmasq",
|
||||
"-u", "root",
|
||||
@@ -313,13 +314,13 @@ func (t *dnsTestCommon) createDNSPodFromObj(pod *v1.Pod) {
|
||||
|
||||
var err error
|
||||
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.dnsServerPod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %v", t.dnsServerPod)
|
||||
framework.Logf("Created pod %v", t.dnsServerPod)
|
||||
Expect(t.f.WaitForPodRunning(t.dnsServerPod.Name)).NotTo(HaveOccurred())
|
||||
Expect(t.f.WaitForPodRunning(t.dnsServerPod.Name)).NotTo(HaveOccurred(), "pod failed to start running: %v", t.dnsServerPod)
|
||||
|
||||
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Get(
|
||||
t.dnsServerPod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod: %s", t.dnsServerPod.Name)
|
||||
}
|
||||
|
||||
func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) {
|
||||
@@ -338,7 +339,7 @@ func (t *dnsTestCommon) createDNSServerWithPtrRecord() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dns",
|
||||
Image: imageutils.GetE2EImage(imageutils.DNSMasq),
|
||||
Image: imageutils.GetE2EImage(imageutils.Dnsutils),
|
||||
Command: []string{
|
||||
"/usr/sbin/dnsmasq",
|
||||
"-u", "root",
|
||||
@@ -359,7 +360,7 @@ func (t *dnsTestCommon) createDNSServerWithPtrRecord() {
|
||||
func (t *dnsTestCommon) deleteDNSServerPod() {
|
||||
podClient := t.c.CoreV1().Pods(t.f.Namespace.Name)
|
||||
if err := podClient.Delete(t.dnsServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
|
||||
framework.Logf("Delete of pod %v:%v failed: %v",
|
||||
framework.Logf("Delete of pod %v/%v failed: %v",
|
||||
t.utilPod.Namespace, t.dnsServerPod.Name, err)
|
||||
}
|
||||
}
|
||||
@@ -512,20 +513,20 @@ func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client
|
||||
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
framework.Failf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)
|
||||
framework.Failf("Unable to read %s from pod %s/%s: %v", fileName, pod.Namespace, pod.Name, err)
|
||||
} else {
|
||||
framework.Logf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)
|
||||
framework.Logf("Unable to read %s from pod %s/%s: %v", fileName, pod.Namespace, pod.Name, err)
|
||||
}
|
||||
failed = append(failed, fileName)
|
||||
} else if check && strings.TrimSpace(string(contents)) != expected {
|
||||
framework.Logf("File %s from pod %s contains '%s' instead of '%s'", fileName, pod.Name, string(contents), expected)
|
||||
framework.Logf("File %s from pod %s/%s contains '%s' instead of '%s'", fileName, pod.Namespace, pod.Name, string(contents), expected)
|
||||
failed = append(failed, fileName)
|
||||
}
|
||||
}
|
||||
if len(failed) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("Lookups using %s failed for: %v\n", pod.Name, failed)
|
||||
framework.Logf("Lookups using %s/%s failed for: %v\n", pod.Namespace, pod.Name, failed)
|
||||
return false, nil
|
||||
}))
|
||||
Expect(len(failed)).To(Equal(0))
|
||||
@@ -540,7 +541,7 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string)
|
||||
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
}()
|
||||
if _, err := podClient.Create(pod); err != nil {
|
||||
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
|
||||
framework.Failf("Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
@@ -548,7 +549,7 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string)
|
||||
By("retrieving the pod")
|
||||
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get pod %s: %v", pod.Name, err)
|
||||
framework.Failf("Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
// Try to find results for each expected name.
|
||||
By("looking for the results for each expected name from probers")
|
||||
@@ -556,7 +557,7 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string)
|
||||
|
||||
// TODO: probe from the host, too.
|
||||
|
||||
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
|
||||
framework.Logf("DNS probes using %s/%s succeeded\n", pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) {
|
||||
@@ -568,7 +569,7 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames
|
||||
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
}()
|
||||
if _, err := podClient.Create(pod); err != nil {
|
||||
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
|
||||
framework.Failf("Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
@@ -576,7 +577,7 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames
|
||||
By("retrieving the pod")
|
||||
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get pod %s: %v", pod.Name, err)
|
||||
framework.Failf("Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
// Try to find the expected value for each expected name.
|
||||
By("looking for the results for each expected name from probers")
|
||||
|
119
vendor/k8s.io/kubernetes/test/e2e/network/dns_configmap.go
generated
vendored
119
vendor/k8s.io/kubernetes/test/e2e/network/dns_configmap.go
generated
vendored
@@ -41,9 +41,9 @@ var (
|
||||
moreForeverTestTimeout = 2 * 60 * time.Second
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("DNS configMap federations", func() {
|
||||
var _ = SIGDescribe("DNS configMap federations [Feature:Federation]", func() {
|
||||
|
||||
t := &dnsNameserverTest{dnsTestCommon: newDnsTestCommon()}
|
||||
t := &dnsFederationsConfigMapTest{dnsTestCommon: newDnsTestCommon()}
|
||||
|
||||
It("should be able to change federation configuration [Slow][Serial]", func() {
|
||||
t.c = t.f.ClientSet
|
||||
@@ -57,39 +57,97 @@ func (t *dnsFederationsConfigMapTest) run() {
|
||||
defer t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil)
|
||||
t.createUtilPodLabel("e2e-dns-configmap")
|
||||
defer t.deleteUtilPod()
|
||||
originalConfigMapData := t.fetchDNSConfigMapData()
|
||||
defer t.restoreDNSConfigMap(originalConfigMapData)
|
||||
|
||||
t.validate()
|
||||
|
||||
t.labels = []string{"abc", "ghi"}
|
||||
valid1 := map[string]string{"federations": t.labels[0] + "=def"}
|
||||
valid1m := map[string]string{t.labels[0]: "def"}
|
||||
valid2 := map[string]string{"federations": t.labels[1] + "=xyz"}
|
||||
valid2m := map[string]string{t.labels[1]: "xyz"}
|
||||
invalid := map[string]string{"federations": "invalid.map=xyz"}
|
||||
if t.name == "coredns" {
|
||||
t.labels = []string{"abc", "ghi"}
|
||||
defaultConfig := map[string]string{
|
||||
"Corefile": `.:53 {
|
||||
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
upstream
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
}`}
|
||||
|
||||
By("empty -> valid1")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
|
||||
t.validate()
|
||||
valid1 := map[string]string{
|
||||
"Corefile": `.:53 {
|
||||
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
upstream
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
federation cluster.local {
|
||||
abc def.com
|
||||
}
|
||||
proxy . /etc/resolv.conf
|
||||
}`}
|
||||
valid1m := map[string]string{t.labels[0]: "def.com"}
|
||||
|
||||
By("valid1 -> valid2")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: valid2}, valid2m, true)
|
||||
t.validate()
|
||||
valid2 := map[string]string{
|
||||
"Corefile": `:53 {
|
||||
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
upstream
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
federation cluster.local {
|
||||
ghi xyz.com
|
||||
}
|
||||
proxy . /etc/resolv.conf
|
||||
}`}
|
||||
valid2m := map[string]string{t.labels[1]: "xyz.com"}
|
||||
|
||||
By("valid2 -> invalid")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false)
|
||||
t.validate()
|
||||
By("default -> valid1")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
|
||||
t.deleteCoreDNSPods()
|
||||
t.validate()
|
||||
|
||||
By("invalid -> valid1")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
|
||||
t.validate()
|
||||
By("valid1 -> valid2")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: valid2}, valid2m, true)
|
||||
t.deleteCoreDNSPods()
|
||||
t.validate()
|
||||
|
||||
By("valid1 -> deleted")
|
||||
t.deleteConfigMap()
|
||||
t.validate()
|
||||
By("valid2 -> default")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: defaultConfig}, nil, false)
|
||||
t.deleteCoreDNSPods()
|
||||
t.validate()
|
||||
|
||||
By("deleted -> invalid")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false)
|
||||
t.validate()
|
||||
} else {
|
||||
t.labels = []string{"abc", "ghi"}
|
||||
valid1 := map[string]string{"federations": t.labels[0] + "=def"}
|
||||
valid1m := map[string]string{t.labels[0]: "def"}
|
||||
valid2 := map[string]string{"federations": t.labels[1] + "=xyz"}
|
||||
valid2m := map[string]string{t.labels[1]: "xyz"}
|
||||
invalid := map[string]string{"federations": "invalid.map=xyz"}
|
||||
|
||||
By("empty -> valid1")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
|
||||
t.validate()
|
||||
|
||||
By("valid1 -> valid2")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: valid2}, valid2m, true)
|
||||
t.validate()
|
||||
|
||||
By("valid2 -> invalid")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false)
|
||||
t.validate()
|
||||
|
||||
By("invalid -> valid1")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
|
||||
t.validate()
|
||||
|
||||
By("valid1 -> deleted")
|
||||
t.deleteConfigMap()
|
||||
t.validate()
|
||||
|
||||
By("deleted -> invalid")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false)
|
||||
t.validate()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *dnsFederationsConfigMapTest) validate() {
|
||||
@@ -104,7 +162,7 @@ func (t *dnsFederationsConfigMapTest) validate() {
|
||||
predicate := func(actual []string) bool {
|
||||
return len(actual) == 0
|
||||
}
|
||||
t.checkDNSRecord(federationDNS, predicate, wait.ForeverTestTimeout)
|
||||
t.checkDNSRecordFrom(federationDNS, predicate, "cluster-dns", wait.ForeverTestTimeout)
|
||||
}
|
||||
} else {
|
||||
for label := range federations {
|
||||
@@ -112,6 +170,9 @@ func (t *dnsFederationsConfigMapTest) validate() {
|
||||
t.utilService.ObjectMeta.Name, t.f.Namespace.Name, label)
|
||||
var localDNS = fmt.Sprintf("%s.%s.svc.cluster.local.",
|
||||
t.utilService.ObjectMeta.Name, t.f.Namespace.Name)
|
||||
if t.name == "coredns" {
|
||||
localDNS = t.utilService.Spec.ClusterIP
|
||||
}
|
||||
// Check local mapping. Checking a remote mapping requires
|
||||
// creating an arbitrary DNS record which is not possible at the
|
||||
// moment.
|
||||
@@ -124,12 +185,14 @@ func (t *dnsFederationsConfigMapTest) validate() {
|
||||
}
|
||||
return false
|
||||
}
|
||||
t.checkDNSRecord(federationDNS, predicate, wait.ForeverTestTimeout)
|
||||
t.checkDNSRecordFrom(federationDNS, predicate, "cluster-dns", wait.ForeverTestTimeout)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *dnsFederationsConfigMapTest) setConfigMap(cm *v1.ConfigMap, fedMap map[string]string, isValid bool) {
|
||||
t.fedMap = nil
|
||||
|
||||
if isValid {
|
||||
t.fedMap = fedMap
|
||||
}
|
||||
|
20
vendor/k8s.io/kubernetes/test/e2e/network/dns_scale_records.go
generated
vendored
20
vendor/k8s.io/kubernetes/test/e2e/network/dns_scale_records.go
generated
vendored
@@ -34,10 +34,11 @@ import (
|
||||
const (
|
||||
parallelCreateServiceWorkers = 1
|
||||
maxServicesPerCluster = 10000
|
||||
maxServicesPerNamespace = 5000
|
||||
checkServicePercent = 0.05
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("[Feature:PerformanceDNS]", func() {
|
||||
var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
|
||||
f := framework.NewDefaultFramework("performancedns")
|
||||
|
||||
BeforeEach(func() {
|
||||
@@ -50,10 +51,19 @@ var _ = SIGDescribe("[Feature:PerformanceDNS]", func() {
|
||||
|
||||
// answers dns for service - creates the maximum number of services, and then check dns record for one
|
||||
It("Should answer DNS query for maximum number of services per cluster", func() {
|
||||
services := generateServicesInNamespace(f.Namespace.Name, maxServicesPerCluster)
|
||||
// get integer ceiling of maxServicesPerCluster / maxServicesPerNamespace
|
||||
numNs := (maxServicesPerCluster + maxServicesPerNamespace - 1) / maxServicesPerNamespace
|
||||
|
||||
var namespaces []string
|
||||
for i := 0; i < numNs; i++ {
|
||||
ns, _ := f.CreateNamespace(f.BaseName, nil)
|
||||
namespaces = append(namespaces, ns.Name)
|
||||
}
|
||||
|
||||
services := generateServicesInNamespaces(namespaces, maxServicesPerCluster)
|
||||
createService := func(i int) {
|
||||
defer GinkgoRecover()
|
||||
framework.ExpectNoError(testutils.CreateServiceWithRetries(f.ClientSet, f.Namespace.Name, services[i]))
|
||||
framework.ExpectNoError(testutils.CreateServiceWithRetries(f.ClientSet, services[i].Namespace, services[i]))
|
||||
}
|
||||
framework.Logf("Creating %v test services", maxServicesPerCluster)
|
||||
workqueue.Parallelize(parallelCreateServiceWorkers, len(services), createService)
|
||||
@@ -86,13 +96,13 @@ var _ = SIGDescribe("[Feature:PerformanceDNS]", func() {
|
||||
})
|
||||
})
|
||||
|
||||
func generateServicesInNamespace(namespace string, num int) []*v1.Service {
|
||||
func generateServicesInNamespaces(namespaces []string, num int) []*v1.Service {
|
||||
services := make([]*v1.Service, num)
|
||||
for i := 0; i < num; i++ {
|
||||
services[i] = &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "svc-" + strconv.Itoa(i),
|
||||
Namespace: namespace,
|
||||
Namespace: namespaces[i%len(namespaces)],
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{{
|
||||
|
13
vendor/k8s.io/kubernetes/test/e2e/network/example_cluster_dns.go
generated
vendored
13
vendor/k8s.io/kubernetes/test/e2e/network/example_cluster_dns.go
generated
vendored
@@ -81,8 +81,9 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
|
||||
namespaces := []*v1.Namespace{nil, nil}
|
||||
for i := range namespaces {
|
||||
var err error
|
||||
namespaces[i], err = f.CreateNamespace(fmt.Sprintf("dnsexample%d", i), nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
namespaceName := fmt.Sprintf("dnsexample%d", i)
|
||||
namespaces[i], err = f.CreateNamespace(namespaceName, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
}
|
||||
|
||||
for _, ns := range namespaces {
|
||||
@@ -104,7 +105,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns.Name).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s", ns.Name)
|
||||
err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods)
|
||||
Expect(err).NotTo(HaveOccurred(), "waiting for all pods to respond")
|
||||
framework.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)
|
||||
@@ -151,7 +152,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
|
||||
// wait for pods to print their result
|
||||
for _, ns := range namespaces {
|
||||
_, err := framework.LookForStringInLog(ns.Name, frontendPodName, frontendPodContainerName, podOutput, framework.PodStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "pod %s failed to print result in logs", frontendPodName)
|
||||
}
|
||||
})
|
||||
})
|
||||
@@ -163,10 +164,10 @@ func getNsCmdFlag(ns *v1.Namespace) string {
|
||||
// pass enough context with the 'old' parameter so that it replaces what your really intended.
|
||||
func prepareResourceWithReplacedString(inputFile, old, new string) string {
|
||||
f, err := os.Open(inputFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to open file: %s", inputFile)
|
||||
defer f.Close()
|
||||
data, err := ioutil.ReadAll(f)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to read from file: %s", inputFile)
|
||||
podYaml := strings.Replace(string(data), old, new, 1)
|
||||
return podYaml
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/network/firewall.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/network/firewall.go
generated
vendored
@@ -70,7 +70,7 @@ var _ = SIGDescribe("Firewall rule", func() {
|
||||
|
||||
By("Creating a LoadBalancer type service with ExternalTrafficPolicy=Global")
|
||||
svc := jig.CreateLoadBalancerService(ns, serviceName, framework.LoadBalancerCreateTimeoutDefault, func(svc *v1.Service) {
|
||||
svc.Spec.Ports = []v1.ServicePort{{Protocol: "TCP", Port: framework.FirewallTestHttpPort}}
|
||||
svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: framework.FirewallTestHttpPort}}
|
||||
svc.Spec.LoadBalancerSourceRanges = firewallTestSourceRanges
|
||||
})
|
||||
defer func() {
|
||||
@@ -80,7 +80,7 @@ var _ = SIGDescribe("Firewall rule", func() {
|
||||
})
|
||||
Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
|
||||
By("Waiting for the local traffic health check firewall rule to be deleted")
|
||||
localHCFwName := framework.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.GetLoadBalancerName(svc), false)
|
||||
localHCFwName := framework.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), false)
|
||||
_, err := framework.WaitForFirewallRule(gceCloud, localHCFwName, false, framework.LoadBalancerCleanupTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
91
vendor/k8s.io/kubernetes/test/e2e/network/ingress.go
generated
vendored
91
vendor/k8s.io/kubernetes/test/e2e/network/ingress.go
generated
vendored
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package network
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
@@ -490,7 +491,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
It("should conform to Ingress spec", func() {
|
||||
jig.PollInterval = 5 * time.Second
|
||||
conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{
|
||||
framework.NEGAnnotation: "true",
|
||||
framework.NEGAnnotation: `{"ingress": true}`,
|
||||
})
|
||||
for _, t := range conformanceTests {
|
||||
By(t.EntryLog)
|
||||
@@ -516,7 +517,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, svc := range svcList.Items {
|
||||
svc.Annotations[framework.NEGAnnotation] = "false"
|
||||
svc.Annotations[framework.NEGAnnotation] = `{"ingress": false}`
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
@@ -529,7 +530,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, svc := range svcList.Items {
|
||||
svc.Annotations[framework.NEGAnnotation] = "true"
|
||||
svc.Annotations[framework.NEGAnnotation] = `{"ingress": true}`
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
@@ -539,7 +540,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
jig.WaitForIngress(true)
|
||||
})
|
||||
|
||||
It("should be able to create a ClusterIP service [Unreleased]", func() {
|
||||
It("should be able to create a ClusterIP service", func() {
|
||||
var err error
|
||||
By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{})
|
||||
@@ -647,6 +648,88 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
It("should sync endpoints for both Ingress-referenced NEG and standalone NEG", func() {
|
||||
name := "hostname"
|
||||
expectedKeys := []int32{80, 443}
|
||||
|
||||
scaleAndValidateExposedNEG := func(num int) {
|
||||
scale, err := f.ClientSet.ExtensionsV1beta1().Deployments(ns).GetScale(name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if scale.Spec.Replicas != int32(num) {
|
||||
scale.Spec.Replicas = int32(num)
|
||||
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(ns).UpdateScale(name, scale)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
wait.Poll(10*time.Second, framework.NEGUpdateTimeout, func() (bool, error) {
|
||||
svc, err := f.ClientSet.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
var status framework.NegStatus
|
||||
v, ok := svc.Annotations[framework.NEGStatusAnnotation]
|
||||
if !ok {
|
||||
// Wait for NEG sync loop to find NEGs
|
||||
framework.Logf("Waiting for %v, got: %+v", framework.NEGStatusAnnotation, svc.Annotations)
|
||||
return false, nil
|
||||
}
|
||||
err = json.Unmarshal([]byte(v), &status)
|
||||
if err != nil {
|
||||
framework.Logf("Error in parsing Expose NEG annotation: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
framework.Logf("Got %v: %v", framework.NEGStatusAnnotation, v)
|
||||
|
||||
// Expect 2 NEGs to be created based on the test setup (neg-exposed)
|
||||
if len(status.NetworkEndpointGroups) != 2 {
|
||||
framework.Logf("Expected 2 NEGs, got %d", len(status.NetworkEndpointGroups))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, port := range expectedKeys {
|
||||
if _, ok := status.NetworkEndpointGroups[port]; !ok {
|
||||
framework.Logf("Expected ServicePort key %v, but does not exist", port)
|
||||
}
|
||||
}
|
||||
|
||||
if len(status.NetworkEndpointGroups) != len(expectedKeys) {
|
||||
framework.Logf("Expected length of %+v to equal length of %+v, but does not", status.NetworkEndpointGroups, expectedKeys)
|
||||
}
|
||||
|
||||
gceCloud := gceController.Cloud.Provider.(*gcecloud.GCECloud)
|
||||
for _, neg := range status.NetworkEndpointGroups {
|
||||
networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if len(networkEndpoints) != num {
|
||||
framework.Logf("Expect number of endpoints to be %d, but got %d", num, len(networkEndpoints))
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(true)
|
||||
usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(usingNEG).To(BeTrue())
|
||||
// initial replicas number is 1
|
||||
scaleAndValidateExposedNEG(1)
|
||||
|
||||
By("Scale up number of backends to 5")
|
||||
scaleAndValidateExposedNEG(5)
|
||||
|
||||
By("Scale down number of backends to 3")
|
||||
scaleAndValidateExposedNEG(3)
|
||||
|
||||
By("Scale up number of backends to 6")
|
||||
scaleAndValidateExposedNEG(6)
|
||||
|
||||
By("Scale down number of backends to 2")
|
||||
scaleAndValidateExposedNEG(3)
|
||||
})
|
||||
})
|
||||
|
||||
Describe("GCE [Slow] [Feature:kubemci]", func() {
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/network/network_policy.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/network/network_policy.go
generated
vendored
@@ -556,7 +556,7 @@ func createNetworkClientPod(f *framework.Framework, namespace *v1.Namespace, pod
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: fmt.Sprintf("%s-container", podName),
|
||||
Image: "busybox",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Args: []string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/network/network_tiers.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/network/network_tiers.go
generated
vendored
@@ -80,7 +80,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(svcTier).To(Equal(cloud.NetworkTierStandard))
|
||||
// Record the LB name for test cleanup.
|
||||
serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(svc))
|
||||
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
|
||||
|
||||
// Wait and verify the LB.
|
||||
ingressIP := waitAndVerifyLBWithTier(jig, ns, svcName, "", createTimeout, lagTimeout)
|
||||
|
19
vendor/k8s.io/kubernetes/test/e2e/network/proxy.go
generated
vendored
19
vendor/k8s.io/kubernetes/test/e2e/network/proxy.go
generated
vendored
@@ -61,26 +61,25 @@ var _ = SIGDescribe("Proxy", func() {
|
||||
prefix := "/api/" + version
|
||||
|
||||
/*
|
||||
Testname: proxy-subresource-node-logs-port
|
||||
Description: Ensure that proxy on node logs works with node proxy
|
||||
subresource and explicit kubelet port.
|
||||
Release : v1.9
|
||||
Testname: Proxy, logs port endpoint
|
||||
Description: Select any node in the cluster to invoke /proxy/nodes/<nodeip>:10250/logs endpoint. This endpoint MUST be reachable.
|
||||
*/
|
||||
framework.ConformanceIt("should proxy logs on node with explicit kubelet port using proxy subresource ", func() { nodeProxyTest(f, prefix+"/nodes/", ":10250/proxy/logs/") })
|
||||
|
||||
/*
|
||||
Testname: proxy-subresource-node-logs
|
||||
Description: Ensure that proxy on node logs works with node proxy
|
||||
subresource.
|
||||
Release : v1.9
|
||||
Testname: Proxy, logs endpoint
|
||||
Description: Select any node in the cluster to invoke /proxy/nodes/<nodeip>//logs endpoint. This endpoint MUST be reachable.
|
||||
*/
|
||||
framework.ConformanceIt("should proxy logs on node using proxy subresource ", func() { nodeProxyTest(f, prefix+"/nodes/", "/proxy/logs/") })
|
||||
|
||||
// using the porter image to serve content, access the content
|
||||
// (of multiple pods?) from multiple (endpoints/services?)
|
||||
|
||||
/*
|
||||
Testname: proxy-service-pod
|
||||
Description: Ensure that proxy through a service and a pod works with
|
||||
both generic top level prefix proxy and proxy subresource.
|
||||
Release : v1.9
|
||||
Testname: Proxy, logs service endpoint
|
||||
Description: Select any node in the cluster to invoke /logs endpoint using the /nodes/proxy subresource from the kubelet port. This endpoint MUST be reachable.
|
||||
*/
|
||||
framework.ConformanceIt("should proxy through a service and a pod ", func() {
|
||||
start := time.Now()
|
||||
|
10
vendor/k8s.io/kubernetes/test/e2e/network/scale/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/network/scale/BUILD
generated
vendored
@@ -6,12 +6,12 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/test/e2e/network/scale",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
8
vendor/k8s.io/kubernetes/test/e2e/network/scale/localrun/BUILD
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/network/scale/localrun/BUILD
generated
vendored
@@ -7,13 +7,13 @@ go_library(
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/network/scale:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
49
vendor/k8s.io/kubernetes/test/e2e/network/service.go
generated
vendored
49
vendor/k8s.io/kubernetes/test/e2e/network/service.go
generated
vendored
@@ -57,7 +57,7 @@ var (
|
||||
Ports: []v1.ServicePort{{
|
||||
Port: int32(defaultServeHostnameServicePort),
|
||||
TargetPort: intstr.FromInt(9376),
|
||||
Protocol: "TCP",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
}},
|
||||
Selector: map[string]string{
|
||||
"name": defaultServeHostnameServiceName,
|
||||
@@ -90,8 +90,8 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.DescribeSvc(f.Namespace.Name)
|
||||
}
|
||||
for _, lb := range serviceLBNames {
|
||||
framework.Logf("cleaning gce resource for %s", lb)
|
||||
framework.CleanupServiceGCEResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
|
||||
framework.Logf("cleaning load balancer resource for %s", lb)
|
||||
framework.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
|
||||
}
|
||||
//reset serviceLBNames
|
||||
serviceLBNames = []string{}
|
||||
@@ -100,8 +100,9 @@ var _ = SIGDescribe("Services", func() {
|
||||
// TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here.
|
||||
|
||||
/*
|
||||
Testname: service-kubernetes-exists
|
||||
Description: Make sure kubernetes service does exist.
|
||||
Release : v1.9
|
||||
Testname: Kubernetes Service
|
||||
Description: By default when a kubernetes cluster is running there MUST be a ‘kubernetes’ service running in the cluster.
|
||||
*/
|
||||
framework.ConformanceIt("should provide secure master service ", func() {
|
||||
_, err := cs.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
|
||||
@@ -109,9 +110,9 @@ var _ = SIGDescribe("Services", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: service-valid-endpoints
|
||||
Description: Ensure a service with no pod, one pod or two pods has
|
||||
valid/accessible endpoints (same port number for service and pods).
|
||||
Release : v1.9
|
||||
Testname: Service, endpoints
|
||||
Description: Create a service with a endpoint without any Pods, the service MUST run and show empty endpoints. Add a pod to the service and the service MUST validate to show all the endpoints for the ports exposed by the Pod. Add another Pod then the list of all Ports exposed by both the Pods MUST be valid and have corresponding service endpoint. Once the second Pod is deleted then set of endpoint MUST be validated to show only ports from the first container that are exposed. Once both pods are deleted the endpoints from the service MUST be empty.
|
||||
*/
|
||||
framework.ConformanceIt("should serve a basic endpoint from pods ", func() {
|
||||
serviceName := "endpoint-test2"
|
||||
@@ -166,9 +167,9 @@ var _ = SIGDescribe("Services", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: service-valid-endpoints-multiple-ports
|
||||
Description: Ensure a service with no pod, one pod or two pods has
|
||||
valid/accessible endpoints (different port number for pods).
|
||||
Release : v1.9
|
||||
Testname: Service, endpoints with multiple ports
|
||||
Description: Create a service with two ports but no Pods are added to the service yet. The service MUST run and show empty set of endpoints. Add a Pod to the first port, service MUST list one endpoint for the Pod on that port. Add another Pod to the second port, service MUST list both the endpoints. Delete the first Pod and the service MUST list only the endpoint to the second Pod. Delete the second Pod and the service must now have empty set of endpoints.
|
||||
*/
|
||||
framework.ConformanceIt("should serve multiport endpoints from pods ", func() {
|
||||
// repacking functionality is intentionally not tested here - it's better to test it in an integration test.
|
||||
@@ -620,9 +621,9 @@ var _ = SIGDescribe("Services", func() {
|
||||
s.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||
})
|
||||
}
|
||||
serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(tcpService))
|
||||
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(tcpService))
|
||||
if loadBalancerSupportsUDP {
|
||||
serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(udpService))
|
||||
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(udpService))
|
||||
}
|
||||
|
||||
By("waiting for the TCP service to have a load balancer")
|
||||
@@ -896,7 +897,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
s.Spec.Type = v1.ServiceTypeClusterIP
|
||||
s.Spec.ExternalName = ""
|
||||
s.Spec.Ports = []v1.ServicePort{
|
||||
{Port: 80, Name: "http", Protocol: "TCP"},
|
||||
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
|
||||
}
|
||||
})
|
||||
jig.SanityCheckService(clusterIPService, v1.ServiceTypeClusterIP)
|
||||
@@ -920,7 +921,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
s.Spec.Type = v1.ServiceTypeNodePort
|
||||
s.Spec.ExternalName = ""
|
||||
s.Spec.Ports = []v1.ServicePort{
|
||||
{Port: 80, Name: "http", Protocol: "TCP"},
|
||||
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
|
||||
}
|
||||
})
|
||||
jig.SanityCheckService(nodePortService, v1.ServiceTypeNodePort)
|
||||
@@ -1624,8 +1625,8 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
framework.DescribeSvc(f.Namespace.Name)
|
||||
}
|
||||
for _, lb := range serviceLBNames {
|
||||
framework.Logf("cleaning gce resource for %s", lb)
|
||||
framework.CleanupServiceGCEResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
|
||||
framework.Logf("cleaning load balancer resource for %s", lb)
|
||||
framework.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
|
||||
}
|
||||
//reset serviceLBNames
|
||||
serviceLBNames = []string{}
|
||||
@@ -1637,7 +1638,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
jig := framework.NewServiceTestJig(cs, serviceName)
|
||||
|
||||
svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil)
|
||||
serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(svc))
|
||||
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
|
||||
healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
|
||||
if healthCheckNodePort == 0 {
|
||||
framework.Failf("Service HealthCheck NodePort was not allocated")
|
||||
@@ -1709,7 +1710,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
}
|
||||
|
||||
})
|
||||
serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(svc))
|
||||
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
|
||||
defer func() {
|
||||
jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
|
||||
Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
|
||||
@@ -1764,7 +1765,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
nodes := jig.GetNodes(framework.MaxNodesForEndpointsTests)
|
||||
|
||||
svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil)
|
||||
serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(svc))
|
||||
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
|
||||
defer func() {
|
||||
jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
|
||||
Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
|
||||
@@ -1817,7 +1818,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
}
|
||||
|
||||
svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil)
|
||||
serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(svc))
|
||||
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
|
||||
defer func() {
|
||||
jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
|
||||
Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
|
||||
@@ -2024,9 +2025,9 @@ func execAffinityTestForLBService(f *framework.Framework, cs clientset.Interface
|
||||
jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
|
||||
defer func() {
|
||||
framework.StopServeHostnameService(cs, ns, serviceName)
|
||||
lb := cloudprovider.GetLoadBalancerName(svc)
|
||||
framework.Logf("cleaning gce resource for %s", lb)
|
||||
framework.CleanupServiceGCEResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
|
||||
lb := cloudprovider.DefaultLoadBalancerName(svc)
|
||||
framework.Logf("cleaning load balancer resource for %s", lb)
|
||||
framework.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
|
||||
}()
|
||||
ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
|
||||
port := int(svc.Spec.Ports[0].Port)
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/network/service_latency.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/network/service_latency.go
generated
vendored
@@ -47,10 +47,9 @@ var _ = SIGDescribe("Service endpoints latency", func() {
|
||||
f := framework.NewDefaultFramework("svc-latency")
|
||||
|
||||
/*
|
||||
Testname: service-endpoint-latency
|
||||
Description: Ensure service endpoint's latency is not high
|
||||
(e.g. p50 < 20 seconds and p99 < 50 seconds). If any call to the
|
||||
service endpoint fails, the test will also fail.
|
||||
Release : v1.9
|
||||
Testname: Service endpoint latency, thresholds
|
||||
Description: Run 100 iterations of create service with the Pod running the pause image, measure the time it takes for creating the service and the endpoint with the service name is available. These durations are captured for 100 iterations, then the durations are sorted to compue 50th, 90th and 99th percentile. The single server latency MUST not exceed liberally set thresholds of 20s for 50th percentile and 50s for the 90th percentile.
|
||||
*/
|
||||
framework.ConformanceIt("should not be very high ", func() {
|
||||
const (
|
||||
|
247
vendor/k8s.io/kubernetes/test/e2e/network/serviceloadbalancers.go
generated
vendored
247
vendor/k8s.io/kubernetes/test/e2e/network/serviceloadbalancers.go
generated
vendored
@@ -1,247 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/manifest"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// getLoadBalancerControllers returns a list of LBCtesters.
|
||||
func getLoadBalancerControllers(client clientset.Interface) []LBCTester {
|
||||
return []LBCTester{
|
||||
&haproxyControllerTester{
|
||||
name: "haproxy",
|
||||
cfg: "test/e2e/testing-manifests/serviceloadbalancer/haproxyrc.yaml",
|
||||
client: client,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// getIngManagers returns a list of ingManagers.
|
||||
func getIngManagers(client clientset.Interface) []*ingManager {
|
||||
return []*ingManager{
|
||||
{
|
||||
name: "netexec",
|
||||
rcCfgPaths: []string{"test/e2e/testing-manifests/serviceloadbalancer/netexecrc.yaml"},
|
||||
svcCfgPaths: []string{"test/e2e/testing-manifests/serviceloadbalancer/netexecsvc.yaml"},
|
||||
svcNames: []string{},
|
||||
client: client,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// LBCTester is an interface used to test loadbalancer controllers.
|
||||
type LBCTester interface {
|
||||
// start starts the loadbalancer controller in the given namespace
|
||||
start(namespace string) error
|
||||
// lookup returns the address (ip/hostname) associated with ingressKey
|
||||
lookup(ingressKey string) string
|
||||
// stop stops the loadbalancer controller
|
||||
stop() error
|
||||
// name returns the name of the loadbalancer
|
||||
getName() string
|
||||
}
|
||||
|
||||
// haproxyControllerTester implements LBCTester for bare metal haproxy LBs.
|
||||
type haproxyControllerTester struct {
|
||||
client clientset.Interface
|
||||
cfg string
|
||||
rcName string
|
||||
rcNamespace string
|
||||
name string
|
||||
address []string
|
||||
}
|
||||
|
||||
func (h *haproxyControllerTester) getName() string {
|
||||
return h.name
|
||||
}
|
||||
|
||||
func (h *haproxyControllerTester) start(namespace string) (err error) {
|
||||
|
||||
// Create a replication controller with the given configuration.
|
||||
framework.Logf("Parsing rc from %v", h.cfg)
|
||||
rc, err := manifest.RcFromManifest(h.cfg)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
rc.Namespace = namespace
|
||||
rc.Spec.Template.Labels["name"] = rc.Name
|
||||
|
||||
// Add the --namespace arg.
|
||||
// TODO: Remove this when we have proper namespace support.
|
||||
for i, c := range rc.Spec.Template.Spec.Containers {
|
||||
rc.Spec.Template.Spec.Containers[i].Args = append(
|
||||
c.Args, fmt.Sprintf("--namespace=%v", namespace))
|
||||
framework.Logf("Container args %+v", rc.Spec.Template.Spec.Containers[i].Args)
|
||||
}
|
||||
|
||||
rc, err = h.client.CoreV1().ReplicationControllers(rc.Namespace).Create(rc)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = framework.WaitForControlledPodsRunning(h.client, namespace, rc.Name, api.Kind("ReplicationController")); err != nil {
|
||||
return
|
||||
}
|
||||
h.rcName = rc.Name
|
||||
h.rcNamespace = rc.Namespace
|
||||
|
||||
// Find the pods of the rc we just created.
|
||||
labelSelector := labels.SelectorFromSet(
|
||||
labels.Set(map[string]string{"name": h.rcName}))
|
||||
options := metav1.ListOptions{LabelSelector: labelSelector.String()}
|
||||
pods, err := h.client.CoreV1().Pods(h.rcNamespace).List(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find the external addresses of the nodes the pods are running on.
|
||||
for _, p := range pods.Items {
|
||||
wait.Poll(1*time.Second, framework.ServiceRespondingTimeout, func() (bool, error) {
|
||||
address, err := framework.GetHostExternalAddress(h.client, &p)
|
||||
if err != nil {
|
||||
framework.Logf("%v", err)
|
||||
return false, nil
|
||||
}
|
||||
h.address = append(h.address, address)
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
if len(h.address) == 0 {
|
||||
return fmt.Errorf("No external ips found for loadbalancer %v", h.getName())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *haproxyControllerTester) stop() error {
|
||||
return h.client.CoreV1().ReplicationControllers(h.rcNamespace).Delete(h.rcName, nil)
|
||||
}
|
||||
|
||||
func (h *haproxyControllerTester) lookup(ingressKey string) string {
|
||||
// The address of a service is the address of the lb/servicename, currently.
|
||||
return fmt.Sprintf("http://%v/%v", h.address[0], ingressKey)
|
||||
}
|
||||
|
||||
// ingManager starts an rc and the associated service.
|
||||
type ingManager struct {
|
||||
rcCfgPaths []string
|
||||
svcCfgPaths []string
|
||||
ingCfgPath string
|
||||
name string
|
||||
namespace string
|
||||
client clientset.Interface
|
||||
svcNames []string
|
||||
}
|
||||
|
||||
func (s *ingManager) getName() string {
|
||||
return s.name
|
||||
}
|
||||
|
||||
func (s *ingManager) start(namespace string) (err error) {
|
||||
// Create rcs
|
||||
for _, rcPath := range s.rcCfgPaths {
|
||||
framework.Logf("Parsing rc from %v", rcPath)
|
||||
var rc *v1.ReplicationController
|
||||
rc, err = manifest.RcFromManifest(rcPath)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
rc.Namespace = namespace
|
||||
rc.Spec.Template.Labels["name"] = rc.Name
|
||||
rc, err = s.client.CoreV1().ReplicationControllers(rc.Namespace).Create(rc)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = framework.WaitForControlledPodsRunning(s.client, rc.Namespace, rc.Name, api.Kind("ReplicationController")); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
// Create services.
|
||||
// Note that it's up to the caller to make sure the service actually matches
|
||||
// the pods of the rc.
|
||||
for _, svcPath := range s.svcCfgPaths {
|
||||
framework.Logf("Parsing service from %v", svcPath)
|
||||
var svc *v1.Service
|
||||
svc, err = manifest.SvcFromManifest(svcPath)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
svc.Namespace = namespace
|
||||
svc, err = s.client.CoreV1().Services(svc.Namespace).Create(svc)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// TODO: This is short term till we have an Ingress.
|
||||
s.svcNames = append(s.svcNames, svc.Name)
|
||||
}
|
||||
s.name = s.svcNames[0]
|
||||
s.namespace = namespace
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ingManager) test(path string) error {
|
||||
url := fmt.Sprintf("%v/hostName", path)
|
||||
httpClient := &http.Client{}
|
||||
return wait.Poll(1*time.Second, framework.ServiceRespondingTimeout, func() (bool, error) {
|
||||
body, err := framework.SimpleGET(httpClient, url, "")
|
||||
if err != nil {
|
||||
framework.Logf("%v\n%v\n%v", url, body, err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
var _ = SIGDescribe("ServiceLoadBalancer [Feature:ServiceLoadBalancer]", func() {
|
||||
// These variables are initialized after framework's beforeEach.
|
||||
var ns string
|
||||
var client clientset.Interface
|
||||
|
||||
f := framework.NewDefaultFramework("servicelb")
|
||||
|
||||
BeforeEach(func() {
|
||||
client = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
})
|
||||
|
||||
It("should support simple GET on Ingress ips", func() {
|
||||
for _, t := range getLoadBalancerControllers(client) {
|
||||
By(fmt.Sprintf("Starting loadbalancer controller %v in namespace %v", t.getName(), ns))
|
||||
Expect(t.start(ns)).NotTo(HaveOccurred())
|
||||
|
||||
for _, s := range getIngManagers(client) {
|
||||
By(fmt.Sprintf("Starting ingress manager %v in namespace %v", s.getName(), ns))
|
||||
Expect(s.start(ns)).NotTo(HaveOccurred())
|
||||
|
||||
for _, sName := range s.svcNames {
|
||||
path := t.lookup(sName)
|
||||
framework.Logf("Testing path %v", path)
|
||||
Expect(s.test(path)).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
|
||||
Expect(t.stop()).NotTo(HaveOccurred())
|
||||
}
|
||||
})
|
||||
})
|
Reference in New Issue
Block a user