Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -16,6 +16,19 @@ go_library(
deps = [
"//pkg/controller:go_default_library",
"//pkg/kubectl/polymorphichelpers:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/generated:go_default_library",
"//test/e2e/scheduling:go_default_library",
@@ -26,19 +39,6 @@ go_library(
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/golang.org/x/net/websocket:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//vendor/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)

View File

@@ -26,7 +26,6 @@ import (
"io"
"io/ioutil"
"log"
"mime/multipart"
"net"
"net/http"
"net/http/httptest"
@@ -93,15 +92,15 @@ var (
nautilusImage = imageutils.GetE2EImage(imageutils.Nautilus)
kittenImage = imageutils.GetE2EImage(imageutils.Kitten)
redisImage = imageutils.GetE2EImage(imageutils.Redis)
nginxImage = imageutils.GetE2EImage(imageutils.NginxSlim)
busyboxImage = "busybox"
nginxImage = imageutils.GetE2EImage(imageutils.Nginx)
busyboxImage = imageutils.GetE2EImage(imageutils.BusyBox)
)
var testImages = struct {
GBFrontendImage string
PauseImage string
NginxSlimImage string
NginxSlimNewImage string
NginxImage string
NginxNewImage string
RedisImage string
GBRedisSlaveImage string
NautilusImage string
@@ -109,8 +108,8 @@ var testImages = struct {
}{
imageutils.GetE2EImage(imageutils.GBFrontend),
imageutils.GetE2EImage(imageutils.Pause),
imageutils.GetE2EImage(imageutils.NginxSlim),
imageutils.GetE2EImage(imageutils.NginxSlimNew),
imageutils.GetE2EImage(imageutils.Nginx),
imageutils.GetE2EImage(imageutils.NginxNew),
imageutils.GetE2EImage(imageutils.Redis),
imageutils.GetE2EImage(imageutils.GBRedisSlave),
imageutils.GetE2EImage(imageutils.Nautilus),
@@ -141,11 +140,11 @@ func substituteImageName(content string) string {
contentWithImageName := new(bytes.Buffer)
tmpl, err := template.New("imagemanifest").Parse(content)
if err != nil {
framework.Failf("Failed Parse the template:", err)
framework.Failf("Failed Parse the template: %v", err)
}
err = tmpl.Execute(contentWithImageName, testImages)
if err != nil {
framework.Failf("Failed executing template:", err)
framework.Failf("Failed executing template: %v", err)
}
return contentWithImageName.String()
}
@@ -183,8 +182,6 @@ var _ = SIGDescribe("Kubectl alpha client", func() {
ns = f.Namespace.Name
})
// Customized Wait / ForEach wrapper for this test. These demonstrate the
framework.KubeDescribe("Kubectl run CronJob", func() {
var nsFlag string
var cjName string
@@ -213,7 +210,7 @@ var _ = SIGDescribe("Kubectl alpha client", func() {
framework.Failf("Failed creating a CronJob with correct schedule %s", schedule)
}
containers := sj.Spec.JobTemplate.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
if checkContainersImage(containers, busyboxImage) {
framework.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers)
}
if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
@@ -255,10 +252,42 @@ var _ = SIGDescribe("Kubectl client", func() {
if err != nil || len(pods) < atLeast {
// TODO: Generalize integrating debug info into these tests so we always get debug info when we need it
framework.DumpAllNamespaceInfo(f.ClientSet, ns)
framework.Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
framework.Failf("Verified %d of %d pods , error: %v", len(pods), atLeast, err)
}
}
debugDiscovery := func() {
home := os.Getenv("HOME")
if len(home) == 0 {
framework.Logf("no $HOME envvar set")
return
}
cacheDir := filepath.Join(home, ".kube", "cache", "discovery")
err := filepath.Walk(cacheDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// only pay attention to $host_$port/v1/serverresources.json files
subpath := strings.TrimPrefix(path, cacheDir+string(filepath.Separator))
parts := filepath.SplitList(subpath)
if len(parts) != 3 || parts[1] != "v1" || parts[2] != "serverresources.json" {
return nil
}
framework.Logf("%s modified at %s (current time: %s)", path, info.ModTime(), time.Now())
data, readError := ioutil.ReadFile(path)
if readError != nil {
framework.Logf("%s error: %v", path, readError)
} else {
framework.Logf("%s content: %s", path, string(data))
}
return nil
})
framework.Logf("scanned %s for discovery docs: %v", home, err)
}
framework.KubeDescribe("Update Demo", func() {
var nautilus, kitten string
BeforeEach(func() {
@@ -291,9 +320,11 @@ var _ = SIGDescribe("Kubectl client", func() {
framework.RunKubectlOrDieInput(nautilus, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
By("scaling down the replication controller")
debugDiscovery()
framework.RunKubectlOrDie("scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
By("scaling up the replication controller")
debugDiscovery()
framework.RunKubectlOrDie("scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
})
@@ -308,6 +339,7 @@ var _ = SIGDescribe("Kubectl client", func() {
framework.RunKubectlOrDieInput(string(nautilus[:]), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
By("rolling-update to new replication controller")
debugDiscovery()
framework.RunKubectlOrDieInput(string(kitten[:]), "rolling-update", "update-demo-nautilus", "--update-period=1s", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, kittenImage, 2, "update-demo", updateDemoSelector, getUDData("kitten.jpg", ns), ns)
// Everything will hopefully be cleaned up when the namespace is deleted.
@@ -393,7 +425,7 @@ var _ = SIGDescribe("Kubectl client", func() {
defer closer.Close()
By("executing a command in the container with pseudo-interactive stdin")
execOutput = framework.NewKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "bash").
execOutput = framework.NewKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "sh").
WithStdinReader(r).
ExecOrDie()
if e, a := "hi", strings.TrimSpace(execOutput); e != a {
@@ -529,7 +561,7 @@ var _ = SIGDescribe("Kubectl client", func() {
ExecOrDie()
Expect(runOutput).ToNot(ContainSubstring("stdin closed"))
g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
runTestPod, _, err := polymorphichelpers.GetFirstPod(f.InternalClientset.Core(), ns, "run=run-test-3", 1*time.Minute, g)
runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g)
if err != nil {
os.Exit(1)
}
@@ -776,6 +808,7 @@ metadata:
By("scale set replicas to 3")
nginxDeploy := "nginx-deployment"
debugDiscovery()
framework.RunKubectlOrDie("scale", "deployment", nginxDeploy, "--replicas=3", nsFlag)
By("apply file doesn't have replicas but image changed")
@@ -783,7 +816,7 @@ metadata:
By("verify replicas still is 3 and image has been updated")
output = framework.RunKubectlOrDieInput(deployment3Yaml, "get", "-f", "-", nsFlag, "-o", "json")
requiredItems := []string{"\"replicas\": 3", imageutils.GetE2EImage(imageutils.NginxSlim)}
requiredItems := []string{"\"replicas\": 3", imageutils.GetE2EImage(imageutils.Nginx)}
for _, item := range requiredItems {
if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl apply", item)
@@ -803,9 +836,6 @@ metadata:
output := framework.RunKubectlOrDie("cluster-info")
// Can't check exact strings due to terminal control commands (colors)
requiredItems := []string{"Kubernetes master", "is running at"}
if framework.ProviderIs("gce", "gke") {
requiredItems = append(requiredItems, "Heapster")
}
for _, item := range requiredItems {
if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl cluster-info", item)
@@ -1236,7 +1266,7 @@ metadata:
framework.Failf("Failed getting rc %s: %v", rcName, err)
}
containers := rc.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
if checkContainersImage(containers, nginxImage) {
framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage)
}
@@ -1297,13 +1327,14 @@ metadata:
framework.Failf("Failed getting rc %s: %v", rcName, err)
}
containers := rc.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
if checkContainersImage(containers, nginxImage) {
framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage)
}
framework.WaitForRCToStabilize(c, ns, rcName, framework.PodStartTimeout)
By("rolling-update to same image controller")
debugDiscovery()
runKubectlRetryOrDie("rolling-update", rcName, "--update-period=1s", "--image="+nginxImage, "--image-pull-policy="+string(v1.PullIfNotPresent), nsFlag)
framework.ValidateController(c, nginxImage, 1, rcName, "run="+rcName, noOpValidatorFn, ns)
})
@@ -1346,7 +1377,7 @@ metadata:
framework.Failf("Failed getting deployment %s: %v", dName, err)
}
containers := d.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
if checkContainersImage(containers, nginxImage) {
framework.Failf("Failed creating deployment %s for 1 pod with expected image %s", dName, nginxImage)
}
@@ -1391,7 +1422,7 @@ metadata:
framework.Failf("Failed getting job %s: %v", jobName, err)
}
containers := job.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
if checkContainersImage(containers, nginxImage) {
framework.Failf("Failed creating job %s for 1 pod with expected image %s: %#v", jobName, nginxImage, containers)
}
if job.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
@@ -1428,7 +1459,7 @@ metadata:
framework.Failf("Failed creating a CronJob with correct schedule %s", schedule)
}
containers := cj.Spec.JobTemplate.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
if checkContainersImage(containers, busyboxImage) {
framework.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers)
}
if cj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
@@ -1464,7 +1495,7 @@ metadata:
framework.Failf("Failed getting pod %s: %v", podName, err)
}
containers := pod.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
if checkContainersImage(containers, nginxImage) {
framework.Failf("Failed creating pod %s with expected image %s", podName, nginxImage)
}
if pod.Spec.RestartPolicy != v1.RestartPolicyNever {
@@ -1518,7 +1549,7 @@ metadata:
framework.Failf("Failed getting deployment %s: %v", podName, err)
}
containers := pod.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
if checkContainersImage(containers, busyboxImage) {
framework.Failf("Failed creating pod with expected image %s", busyboxImage)
}
})
@@ -1834,6 +1865,10 @@ func checkKubectlOutputWithRetry(required [][]string, args ...string) {
return
}
func checkContainersImage(containers []v1.Container, expectImage string) bool {
return containers == nil || len(containers) != 1 || containers[0].Image != expectImage
}
func getAPIVersions(apiEndpoint string) (*metav1.APIVersions, error) {
body, err := curl(apiEndpoint)
if err != nil {
@@ -1959,21 +1994,6 @@ type updateDemoData struct {
const applyTestLabel = "kubectl.kubernetes.io/apply-test"
func readBytesFromFile(filename string) []byte {
file, err := os.Open(filename)
if err != nil {
framework.Failf(err.Error())
}
defer file.Close()
data, err := ioutil.ReadAll(file)
if err != nil {
framework.Failf(err.Error())
}
return data
}
func readReplicationControllerFromString(contents string) *v1.ReplicationController {
rc := v1.ReplicationController{}
if err := yaml.Unmarshal([]byte(contents), &rc); err != nil {
@@ -2090,47 +2110,6 @@ func newBlockingReader(s string) (io.Reader, io.Closer, error) {
return r, w, nil
}
// newStreamingUpload creates a new http.Request that will stream POST
// a file to a URI.
func newStreamingUpload(filePath string) (*io.PipeReader, *multipart.Writer, error) {
file, err := os.Open(filePath)
if err != nil {
return nil, nil, err
}
defer file.Close()
r, w := io.Pipe()
postBodyWriter := multipart.NewWriter(w)
go streamingUpload(file, filepath.Base(filePath), postBodyWriter, w)
return r, postBodyWriter, err
}
// streamingUpload streams a file via a pipe through a multipart.Writer.
// Generally one should use newStreamingUpload instead of calling this directly.
func streamingUpload(file *os.File, fileName string, postBodyWriter *multipart.Writer, w *io.PipeWriter) {
defer GinkgoRecover()
defer file.Close()
defer w.Close()
// Set up the form file
fileWriter, err := postBodyWriter.CreateFormFile("file", fileName)
if err != nil {
framework.Failf("Unable to to write file at %s to buffer. Error: %s", fileName, err)
}
// Copy kubectl binary into the file writer
if _, err := io.Copy(fileWriter, file); err != nil {
framework.Failf("Unable to to copy file at %s into the file writer. Error: %s", fileName, err)
}
// Nothing more should be written to this instance of the postBodyWriter
if err := postBodyWriter.Close(); err != nil {
framework.Failf("Unable to close the writer for file upload. Error: %s", err)
}
}
func startLocalProxy() (srv *httptest.Server, logs *bytes.Buffer) {
logs = &bytes.Buffer{}
p := goproxy.NewProxyHttpServer()