Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -12,6 +12,7 @@ go_library(
"cluster_upgrade.go",
"framework.go",
"ha_master.go",
"kubelet_security.go",
"reboot.go",
"resize_nodes.go",
"restart.go",
@@ -20,7 +21,16 @@ go_library(
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/kubelet/pod:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/util/version:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/chaosmonkey:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
@@ -34,14 +44,6 @@ go_library(
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/golang.org/x/crypto/ssh:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)

View File

@@ -14,16 +14,16 @@ go_library(
],
importpath = "k8s.io/kubernetes/test/e2e/lifecycle/bootstrap",
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/tools/bootstrap/token/api:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/lifecycle:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/bootstrap/token/api:go_default_library",
],
)

View File

@@ -304,6 +304,26 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
})
})
Describe("cluster downgrade", func() {
It("should be able to run gpu pod after downgrade [Feature:GPUClusterDowngrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "GPU cluster downgrade"}
gpuDowngradeTest := &junit.TestCase{Name: "[sig-node] gpu-cluster-downgrade", Classname: "upgrade_tests"}
testSuite.TestCases = append(testSuite.TestCases, gpuDowngradeTest)
upgradeFunc := func() {
start := time.Now()
defer finalizeUpgradeTest(start, gpuDowngradeTest)
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
}
runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
})
})
})
var _ = Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]", func() {

View File

@@ -0,0 +1,85 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import (
"fmt"
"net"
"net/http"
"time"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
f := framework.NewDefaultFramework("kubelet-security")
var node *v1.Node
var nodeName string
BeforeEach(func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodes.Items)).NotTo(BeZero())
node = &nodes.Items[0]
nodeName = node.Name
})
// make sure kubelet readonly (10255) and cadvisor (4194) ports are disabled via API server proxy
It(fmt.Sprintf("should not be able to proxy to the readonly kubelet port %v using proxy subresource", ports.KubeletReadOnlyPort), func() {
result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "pods/", ports.KubeletReadOnlyPort)
Expect(err).NotTo(HaveOccurred())
var statusCode int
result.StatusCode(&statusCode)
Expect(statusCode).NotTo(Equal(http.StatusOK))
})
It("should not be able to proxy to cadvisor port 4194 using proxy subresource", func() {
result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "containers/", 4194)
Expect(err).NotTo(HaveOccurred())
var statusCode int
result.StatusCode(&statusCode)
Expect(statusCode).NotTo(Equal(http.StatusOK))
})
// make sure kubelet readonly (10255) and cadvisor (4194) ports are closed on the public IP address
disabledPorts := []int{ports.KubeletReadOnlyPort, 4194}
for _, port := range disabledPorts {
It(fmt.Sprintf("should not have port %d open on its all public IP addresses", port), func() {
portClosedTest(f, node, port)
})
}
})
// checks whether the target port is closed
func portClosedTest(f *framework.Framework, pickNode *v1.Node, port int) {
nodeAddrs := framework.GetNodeAddresses(pickNode, v1.NodeExternalIP)
Expect(len(nodeAddrs)).NotTo(BeZero())
for _, addr := range nodeAddrs {
conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", addr, port), 1*time.Minute)
if err == nil {
conn.Close()
framework.Failf("port %d is not disabled", port)
}
}
}

View File

@@ -98,13 +98,13 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
It("each node by ordering unclean reboot and ensure they function upon restart", func() {
// unclean shutdown and restart
// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is shutdown.
testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil)
testReboot(f.ClientSet, "nohup sh -c 'echo 1 | sudo tee /proc/sys/kernel/sysrq && sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil)
})
It("each node by triggering kernel panic and ensure they function upon restart", func() {
// kernel panic
// We sleep 10 seconds to give some time for ssh command to cleanly finish before kernel panic is triggered.
testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil)
testReboot(f.ClientSet, "nohup sh -c 'echo 1 | sudo tee /proc/sys/kernel/sysrq && sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil)
})
It("each node by switching off the network interface and ensure they function upon switch on", func() {

View File

@@ -45,13 +45,12 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
var systemPodsNo int32
var c clientset.Interface
var ns string
ignoreLabels := framework.ImagePullerLabels
var group string
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
systemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
systemPods, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
Expect(err).NotTo(HaveOccurred())
systemPodsNo = int32(len(systemPods))
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
@@ -104,10 +103,8 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
// the cluster is restored to health.
By("waiting for system pods to successfully restart")
err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, ignoreLabels)
err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
Expect(err).NotTo(HaveOccurred())
By("waiting for image prepulling pods to complete")
framework.WaitForPodsSuccess(c, metav1.NamespaceSystem, framework.ImagePullerLabels, framework.ImagePrePullingTimeout)
})
It("should be able to delete nodes", func() {