Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -66,15 +66,15 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/apimachinery/pkg/watch"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
scaleclient "k8s.io/client-go/scale"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale"
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/kubernetes/pkg/api/legacyscheme"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
@@ -87,6 +87,7 @@ import (
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/controller"
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
"k8s.io/kubernetes/pkg/controller/service"
"k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/kubelet/util/format"
@@ -200,19 +201,10 @@ const (
// ssh port
sshPort = "22"
// ImagePrePullingTimeout is the time we wait for the e2e-image-puller
// static pods to pull the list of seeded images. If they don't pull
// images within this time we simply log their output and carry on
// with the tests.
ImagePrePullingTimeout = 5 * time.Minute
)
var (
BusyBoxImage = "busybox"
// Label allocated to the image puller static pod that runs on each node
// before e2es.
ImagePullerLabels = map[string]string{"name": "e2e-image-puller"}
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
// For parsing Kubectl version for version-skewed testing.
gitVersionRegexp = regexp.MustCompile("GitVersion:\"(v.+?)\"")
@@ -250,11 +242,6 @@ func GetServerArchitecture(c clientset.Interface) string {
return arch
}
// GetPauseImageName fetches the pause image name for the same architecture as the apiserver.
func GetPauseImageName(c clientset.Interface) string {
return imageutils.GetE2EImageWithArch(imageutils.Pause, GetServerArchitecture(c))
}
func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) {
return request.Resource("services").SubResource("proxy"), nil
}
@@ -272,7 +259,7 @@ type ContainerFailures struct {
func GetMasterHost() string {
masterUrl, err := url.Parse(TestContext.Host)
ExpectNoError(err)
return masterUrl.Host
return masterUrl.Hostname()
}
func nowStamp() string {
@@ -638,7 +625,7 @@ func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[s
//
// If ignoreLabels is not empty, pods matching this selector are ignored.
func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
ignoreSelector := labels.SelectorFromSet(map[string]string{})
start := time.Now()
Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
timeout, minPods, ns)
@@ -738,6 +725,40 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
return nil
}
// WaitForDaemonSets for all daemonsets in the given namespace to be ready
// (defined as all but 'allowedNotReadyNodes' pods associated with that
// daemonset are ready).
func WaitForDaemonSets(c clientset.Interface, ns string, allowedNotReadyNodes int32, timeout time.Duration) error {
start := time.Now()
Logf("Waiting up to %v for all daemonsets in namespace '%s' to start",
timeout, ns)
return wait.PollImmediate(Poll, timeout, func() (bool, error) {
dsList, err := c.AppsV1().DaemonSets(ns).List(metav1.ListOptions{})
if err != nil {
Logf("Error getting daemonsets in namespace: '%s': %v", ns, err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
var notReadyDaemonSets []string
for _, ds := range dsList.Items {
Logf("%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)", ds.Status.NumberReady, ds.Status.DesiredNumberScheduled, ns, ds.ObjectMeta.Name, int(time.Since(start).Seconds()))
if ds.Status.DesiredNumberScheduled-ds.Status.NumberReady > allowedNotReadyNodes {
notReadyDaemonSets = append(notReadyDaemonSets, ds.ObjectMeta.Name)
}
}
if len(notReadyDaemonSets) > 0 {
Logf("there are not ready daemonsets: %v", notReadyDaemonSets)
return false, nil
}
return true, nil
})
}
func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) {
for _, container := range pod.Spec.Containers {
if strings.Contains(container.Name, containerNameSubstr) {
@@ -862,7 +883,9 @@ func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountN
if err != nil {
return err
}
_, err = watch.Until(timeout, w, conditions.ServiceAccountHasSecrets)
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, w, conditions.ServiceAccountHasSecrets)
return err
}
@@ -1578,7 +1601,9 @@ func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.D
if err != nil {
return err
}
_, err = watch.Until(timeout, w, func(event watch.Event) (bool, error) {
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, w, func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
return false, apierrs.NewNotFound(schema.GroupResource{Resource: "replicationcontrollers"}, "")
@@ -2621,6 +2646,8 @@ func GetReadyNodesIncludingTaintedOrDie(c clientset.Interface) (nodes *v1.NodeLi
return nodes
}
// WaitForAllNodesSchedulable waits up to timeout for all
// (but TestContext.AllowedNotReadyNodes) to become scheduable.
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes)
@@ -2643,7 +2670,13 @@ func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) er
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !isNodeSchedulable(node) {
if _, hasMasterRoleLabel := node.ObjectMeta.Labels[service.LabelNodeRoleMaster]; hasMasterRoleLabel {
// Kops clusters have masters with spec.unscheduable = false and
// node-role.kubernetes.io/master NoSchedule taint.
// Don't wait for them.
continue
}
if !isNodeSchedulable(node) || !isNodeUntainted(node) {
notSchedulable = append(notSchedulable, node)
}
}
@@ -2659,10 +2692,11 @@ func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) er
if len(nodes.Items) >= largeClusterThreshold && attempt%10 == 0 {
Logf("Unschedulable nodes:")
for i := range notSchedulable {
Logf("-> %s Ready=%t Network=%t",
Logf("-> %s Ready=%t Network=%t Taints=%v",
notSchedulable[i].Name,
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeReady, true),
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false))
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false),
notSchedulable[i].Spec.Taints)
}
Logf("================================")
}
@@ -4119,7 +4153,9 @@ func CheckNodesReady(c clientset.Interface, size int, timeout time.Duration) ([]
// Filter out not-ready nodes.
FilterNodes(nodes, func(node v1.Node) bool {
return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
nodeReady := IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
networkReady := IsNodeConditionUnset(&node, v1.NodeNetworkUnavailable) || IsNodeConditionSetAsExpected(&node, v1.NodeNetworkUnavailable, false)
return nodeReady && networkReady
})
numReady := len(nodes.Items)
@@ -4492,7 +4528,7 @@ func isElementOf(podUID types.UID, pods *v1.PodList) bool {
const proxyTimeout = 2 * time.Minute
// NodeProxyRequest performs a get on a node proxy endpoint given the nodename and rest client.
func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.Result, error) {
func NodeProxyRequest(c clientset.Interface, node, endpoint string, port int) (restclient.Result, error) {
// proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call.
// This will leak a goroutine if proxy hangs. #22165
var result restclient.Result
@@ -4501,7 +4537,7 @@ func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.
result = c.CoreV1().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
Name(fmt.Sprintf("%v:%v", node, port)).
Suffix(endpoint).
Do()
@@ -4529,7 +4565,7 @@ func GetKubeletRunningPods(c clientset.Interface, node string) (*v1.PodList, err
func getKubeletPods(c clientset.Interface, node, resource string) (*v1.PodList, error) {
result := &v1.PodList{}
client, err := NodeProxyRequest(c, node, resource)
client, err := NodeProxyRequest(c, node, resource, ports.KubeletPort)
if err != nil {
return &v1.PodList{}, err
}
@@ -4798,7 +4834,8 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeList) {
nodes := &v1.NodeList{}
masters := sets.NewString()
all, _ := c.CoreV1().Nodes().List(metav1.ListOptions{})
all, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
ExpectNoError(err)
for _, n := range all.Items {
if system.IsMasterNode(n.Name) {
masters.Insert(n.Name)
@@ -4842,8 +4879,8 @@ func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testut
func (p *E2ETestNodePreparer) PrepareNodes() error {
nodes := GetReadySchedulableNodesOrDie(p.client)
numTemplates := 0
for k := range p.countToStrategy {
numTemplates += k
for _, v := range p.countToStrategy {
numTemplates += v.Count
}
if numTemplates > len(nodes.Items) {
return fmt.Errorf("Can't prepare Nodes. Got more templates than existing Nodes.")
@@ -4999,7 +5036,7 @@ func GetMasterAddress(c clientset.Interface) string {
// GetNodeExternalIP returns node external IP concatenated with port 22 for ssh
// e.g. 1.2.3.4:22
func GetNodeExternalIP(node *v1.Node) string {
func GetNodeExternalIP(node *v1.Node) (string, error) {
Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
@@ -5009,9 +5046,26 @@ func GetNodeExternalIP(node *v1.Node) string {
}
}
if host == "" {
Failf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
return "", fmt.Errorf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
}
return host
return host, nil
}
// GetNodeInternalIP returns node internal IP
func GetNodeInternalIP(node *v1.Node) (string, error) {
host := ""
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeInternalIP {
if address.Address != "" {
host = net.JoinHostPort(address.Address, sshPort)
break
}
}
}
if host == "" {
return "", fmt.Errorf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
}
return host, nil
}
// SimpleGET executes a get on the given url, returns error if non-200 returned.
@@ -5076,7 +5130,7 @@ func (f *Framework) NewTestPod(name string, requests v1.ResourceList, limits v1.
Containers: []v1.Container{
{
Name: "pause",
Image: GetPauseImageName(f.ClientSet),
Image: imageutils.GetPauseImageName(),
Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,