Bumping k8s dependencies to 1.13
This commit is contained in:
107
vendor/k8s.io/kubernetes/test/e2e/framework/BUILD
generated
vendored
107
vendor/k8s.io/kubernetes/test/e2e/framework/BUILD
generated
vendored
@@ -15,6 +15,7 @@ go_library(
|
||||
"deployment_util.go",
|
||||
"exec_util.go",
|
||||
"firewall_util.go",
|
||||
"flake_reporting_util.go",
|
||||
"framework.go",
|
||||
"get-kubemark-resource-usage.go",
|
||||
"google_compute.go",
|
||||
@@ -58,10 +59,12 @@ go_library(
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/job:go_default_library",
|
||||
"//pkg/controller/nodelifecycle:go_default_library",
|
||||
"//pkg/controller/service:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/apis/config:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/dockershim/metrics:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
@@ -80,6 +83,58 @@ go_library(
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1beta2:go_default_library",
|
||||
"//staging/src/k8s.io/api/authorization/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/version:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery/cached:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/scale:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
|
||||
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/manifest:go_default_library",
|
||||
@@ -103,56 +158,6 @@ go_library(
|
||||
"//vendor/golang.org/x/net/websocket:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
|
||||
"//vendor/k8s.io/api/authorization/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/version:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery/cached:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//vendor/k8s.io/client-go/scale:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/remotecommand:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
10
vendor/k8s.io/kubernetes/test/e2e/framework/authorizer_util.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/framework/authorizer_util.go
generated
vendored
@@ -17,7 +17,7 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -62,7 +62,7 @@ func WaitForNamedAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviews
|
||||
// GKE doesn't enable the SAR endpoint. Without this endpoint, we cannot determine if the policy engine
|
||||
// has adjusted as expected. In this case, simply wait one second and hope it's up to date
|
||||
if apierrors.IsNotFound(err) {
|
||||
fmt.Printf("SubjectAccessReview endpoint is missing\n")
|
||||
glog.Info("SubjectAccessReview endpoint is missing")
|
||||
time.Sleep(1 * time.Second)
|
||||
return true, nil
|
||||
}
|
||||
@@ -94,7 +94,7 @@ func BindClusterRole(c v1beta1rbac.ClusterRoleBindingsGetter, clusterRole, ns st
|
||||
|
||||
// if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled.
|
||||
if err != nil {
|
||||
fmt.Printf("Error binding clusterrole/%s for %q for %v\n", clusterRole, ns, subjects)
|
||||
glog.Errorf("Error binding clusterrole/%s for %q for %v\n", clusterRole, ns, subjects)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,7 +124,7 @@ func bindInNamespace(c v1beta1rbac.RoleBindingsGetter, roleType, role, ns string
|
||||
|
||||
// if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled.
|
||||
if err != nil {
|
||||
fmt.Printf("Error binding %s/%s into %q for %v\n", roleType, role, ns, subjects)
|
||||
glog.Errorf("Error binding %s/%s into %q for %v\n", roleType, role, ns, subjects)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -140,7 +140,7 @@ func IsRBACEnabled(f *Framework) bool {
|
||||
Logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err)
|
||||
isRBACEnabled = false
|
||||
} else if crs == nil || len(crs.Items) == 0 {
|
||||
Logf("No ClusteRoles found; assuming RBAC is disabled.")
|
||||
Logf("No ClusterRoles found; assuming RBAC is disabled.")
|
||||
isRBACEnabled = false
|
||||
} else {
|
||||
Logf("Found ClusterRoles; assuming RBAC is enabled.")
|
||||
|
6
vendor/k8s.io/kubernetes/test/e2e/framework/crd_util.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/framework/crd_util.go
generated
vendored
@@ -21,7 +21,7 @@ import (
|
||||
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
crdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apiextensions-apiserver/test/integration/testserver"
|
||||
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
@@ -76,7 +76,7 @@ func CreateTestCRD(f *Framework) (*TestCrd, error) {
|
||||
crd := newCRDForTest(testcrd)
|
||||
|
||||
//create CRD and waits for the resource to be recognized and available.
|
||||
crd, err = testserver.CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)
|
||||
crd, err = fixtures.CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)
|
||||
if err != nil {
|
||||
Failf("failed to create CustomResourceDefinition: %v", err)
|
||||
return nil, err
|
||||
@@ -89,7 +89,7 @@ func CreateTestCRD(f *Framework) (*TestCrd, error) {
|
||||
testcrd.Crd = crd
|
||||
testcrd.DynamicClient = resourceClient
|
||||
testcrd.CleanUp = func() error {
|
||||
err := testserver.DeleteCustomResourceDefinition(crd, apiExtensionClient)
|
||||
err := fixtures.DeleteCustomResourceDefinition(crd, apiExtensionClient)
|
||||
if err != nil {
|
||||
Failf("failed to delete CustomResourceDefinition(%s): %v", name, err)
|
||||
}
|
||||
|
9
vendor/k8s.io/kubernetes/test/e2e/framework/deployment_util.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e/framework/deployment_util.go
generated
vendored
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -30,9 +31,11 @@ import (
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*apps.Deployment, error) {
|
||||
@@ -172,7 +175,9 @@ func WatchRecreateDeployment(c clientset.Interface, d *apps.Deployment) error {
|
||||
d.Generation <= d.Status.ObservedGeneration, nil
|
||||
}
|
||||
|
||||
_, err = watch.Until(2*time.Minute, w, condition)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, w, condition)
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
|
||||
}
|
||||
@@ -255,7 +260,7 @@ func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector ma
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: "busybox",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", command},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/firewall_util.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/firewall_util.go
generated
vendored
@@ -54,7 +54,7 @@ func ConstructFirewallForLBService(svc *v1.Service, nodeTag string) *compute.Fir
|
||||
Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||
}
|
||||
fw := compute.Firewall{}
|
||||
fw.Name = MakeFirewallNameForLBService(cloudprovider.GetLoadBalancerName(svc))
|
||||
fw.Name = MakeFirewallNameForLBService(cloudprovider.DefaultLoadBalancerName(svc))
|
||||
fw.TargetTags = []string{nodeTag}
|
||||
if svc.Spec.LoadBalancerSourceRanges == nil {
|
||||
fw.SourceRanges = []string{"0.0.0.0/0"}
|
||||
@@ -80,7 +80,7 @@ func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service,
|
||||
Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||
}
|
||||
fw := compute.Firewall{}
|
||||
fw.Name = MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.GetLoadBalancerName(svc), isNodesHealthCheck)
|
||||
fw.Name = MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), isNodesHealthCheck)
|
||||
fw.TargetTags = []string{nodeTag}
|
||||
fw.SourceRanges = gcecloud.LoadBalancerSrcRanges()
|
||||
healthCheckPort := gcecloud.GetNodesHealthCheckPort()
|
||||
|
91
vendor/k8s.io/kubernetes/test/e2e/framework/flake_reporting_util.go
generated
vendored
Normal file
91
vendor/k8s.io/kubernetes/test/e2e/framework/flake_reporting_util.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type FlakeReport struct {
|
||||
lock sync.RWMutex
|
||||
Flakes []string `json:"flakes"`
|
||||
FlakeCount int `json:"flakeCount"`
|
||||
}
|
||||
|
||||
func NewFlakeReport() *FlakeReport {
|
||||
return &FlakeReport{
|
||||
Flakes: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
func buildDescription(optionalDescription ...interface{}) string {
|
||||
switch len(optionalDescription) {
|
||||
case 0:
|
||||
return ""
|
||||
default:
|
||||
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
// RecordFlakeIfError records the error (if non-nil) as a flake along with an optional description.
|
||||
// This can be used as a replacement of framework.ExpectNoError() for non-critical errors that can
|
||||
// be considered as 'flakes' to avoid causing failures in tests.
|
||||
func (f *FlakeReport) RecordFlakeIfError(err error, optionalDescription ...interface{}) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
msg := fmt.Sprintf("Unexpected error occurred: %v", err)
|
||||
desc := buildDescription(optionalDescription)
|
||||
if desc != "" {
|
||||
msg = fmt.Sprintf("%v (Description: %v)", msg, desc)
|
||||
}
|
||||
Logf(msg)
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.Flakes = append(f.Flakes, msg)
|
||||
f.FlakeCount++
|
||||
}
|
||||
|
||||
func (f *FlakeReport) GetFlakeCount() int {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
return f.FlakeCount
|
||||
}
|
||||
|
||||
func (f *FlakeReport) PrintHumanReadable() string {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
buf := bytes.Buffer{}
|
||||
buf.WriteString(fmt.Sprintf("FlakeCount: %v\n", f.FlakeCount))
|
||||
buf.WriteString("Flakes:\n")
|
||||
for _, flake := range f.Flakes {
|
||||
buf.WriteString(fmt.Sprintf("%v\n", flake))
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (f *FlakeReport) PrintJSON() string {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
return PrettyPrintJSON(f)
|
||||
}
|
||||
|
||||
func (f *FlakeReport) SummaryKind() string {
|
||||
return "FlakeReport"
|
||||
}
|
59
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
59
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
@@ -21,12 +21,12 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -42,6 +42,7 @@ import (
|
||||
"k8s.io/client-go/restmapper"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
csi "k8s.io/csi-api/pkg/client/clientset/versioned"
|
||||
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
@@ -67,6 +68,8 @@ type Framework struct {
|
||||
|
||||
ClientSet clientset.Interface
|
||||
KubemarkExternalClusterClientSet clientset.Interface
|
||||
APIExtensionsClientSet apiextensionsclient.Interface
|
||||
CSIClientSet csi.Interface
|
||||
|
||||
InternalClientset *internalclientset.Clientset
|
||||
AggregatorClient *aggregatorclient.Clientset
|
||||
@@ -90,6 +93,9 @@ type Framework struct {
|
||||
logsSizeCloseChannel chan bool
|
||||
logsSizeVerifier *LogsSizeVerifier
|
||||
|
||||
// Flaky operation failures in an e2e test can be captured through this.
|
||||
flakeReport *FlakeReport
|
||||
|
||||
// To make sure that this framework cleans up after itself, no matter what,
|
||||
// we install a Cleanup action before each test and clear it after. If we
|
||||
// should abort, the AfterSuite hook should run all Cleanup actions.
|
||||
@@ -152,6 +158,15 @@ func (f *Framework) BeforeEach() {
|
||||
if f.ClientSet == nil {
|
||||
By("Creating a kubernetes client")
|
||||
config, err := LoadConfig()
|
||||
testDesc := CurrentGinkgoTestDescription()
|
||||
if len(testDesc.ComponentTexts) > 0 {
|
||||
componentTexts := strings.Join(testDesc.ComponentTexts, " ")
|
||||
config.UserAgent = fmt.Sprintf(
|
||||
"%v -- %v",
|
||||
rest.DefaultKubernetesUserAgent(),
|
||||
componentTexts)
|
||||
}
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
config.QPS = f.Options.ClientQPS
|
||||
config.Burst = f.Options.ClientBurst
|
||||
@@ -163,12 +178,19 @@ func (f *Framework) BeforeEach() {
|
||||
}
|
||||
f.ClientSet, err = clientset.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.APIExtensionsClientSet, err = apiextensionsclient.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.InternalClientset, err = internalclientset.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.AggregatorClient, err = aggregatorclient.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.DynamicClient, err = dynamic.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// csi.storage.k8s.io is based on CRD, which is served only as JSON
|
||||
jsonConfig := config
|
||||
jsonConfig.ContentType = "application/json"
|
||||
f.CSIClientSet, err = csi.NewForConfig(jsonConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// create scales getter, set GroupVersion and NegotiatedSerializer to default values
|
||||
// as they are required when creating a REST client.
|
||||
@@ -210,7 +232,7 @@ func (f *Framework) BeforeEach() {
|
||||
}
|
||||
|
||||
if !f.SkipNamespaceCreation {
|
||||
By("Building a namespace api object")
|
||||
By(fmt.Sprintf("Building a namespace api object, basename %s", f.BaseName))
|
||||
namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
|
||||
"e2e-framework": f.BaseName,
|
||||
})
|
||||
@@ -269,6 +291,8 @@ func (f *Framework) BeforeEach() {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
f.flakeReport = NewFlakeReport()
|
||||
}
|
||||
|
||||
// AfterEach deletes the namespace, after reading its events.
|
||||
@@ -326,25 +350,6 @@ func (f *Framework) AfterEach() {
|
||||
if !f.SkipNamespaceCreation {
|
||||
DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name)
|
||||
}
|
||||
|
||||
logFunc := Logf
|
||||
if TestContext.ReportDir != "" {
|
||||
filePath := path.Join(TestContext.ReportDir, "image-puller.txt")
|
||||
file, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
By(fmt.Sprintf("Failed to create a file with image-puller data %v: %v\nPrinting to stdout", filePath, err))
|
||||
} else {
|
||||
By(fmt.Sprintf("Dumping a list of prepulled images on each node to file %v", filePath))
|
||||
defer file.Close()
|
||||
if err = file.Chmod(0644); err != nil {
|
||||
Logf("Failed to chmod to 644 of %v: %v", filePath, err)
|
||||
}
|
||||
logFunc = GetLogToFileFunc(file)
|
||||
}
|
||||
} else {
|
||||
By("Dumping a list of prepulled images on each node...")
|
||||
}
|
||||
LogContainersInPodsWithLabels(f.ClientSet, metav1.NamespaceSystem, ImagePullerLabels, "image-puller", logFunc)
|
||||
}
|
||||
|
||||
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil {
|
||||
@@ -382,6 +387,12 @@ func (f *Framework) AfterEach() {
|
||||
close(f.kubemarkControllerCloseChannel)
|
||||
}
|
||||
|
||||
// Report any flakes that were observed in the e2e test and reset.
|
||||
if f.flakeReport != nil && f.flakeReport.GetFlakeCount() > 0 {
|
||||
f.TestSummaries = append(f.TestSummaries, f.flakeReport)
|
||||
f.flakeReport = nil
|
||||
}
|
||||
|
||||
PrintSummaries(f.TestSummaries, f.BaseName)
|
||||
|
||||
// Check whether all nodes are ready after the test.
|
||||
@@ -409,6 +420,10 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
|
||||
return ns, err
|
||||
}
|
||||
|
||||
func (f *Framework) RecordFlakeIfError(err error, optionalDescription ...interface{}) {
|
||||
f.flakeReport.RecordFlakeIfError(err, optionalDescription)
|
||||
}
|
||||
|
||||
// AddNamespacesToDelete adds one or more namespaces to be deleted when the test
|
||||
// completes.
|
||||
func (f *Framework) AddNamespacesToDelete(namespaces ...*v1.Namespace) {
|
||||
@@ -533,7 +548,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
|
||||
return nil
|
||||
} else {
|
||||
return []v1.ServicePort{{
|
||||
Protocol: "TCP",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: int32(svcPort),
|
||||
TargetPort: intstr.FromInt(contPort),
|
||||
}}
|
||||
|
16
vendor/k8s.io/kubernetes/test/e2e/framework/gpu_util.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e/framework/gpu_util.go
generated
vendored
@@ -21,6 +21,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
@@ -50,13 +51,13 @@ func NumberOfNVIDIAGPUs(node *v1.Node) int64 {
|
||||
}
|
||||
|
||||
// NVIDIADevicePlugin returns the official Google Device Plugin pod for NVIDIA GPU in GKE
|
||||
func NVIDIADevicePlugin(ns string) *v1.Pod {
|
||||
func NVIDIADevicePlugin() *v1.Pod {
|
||||
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
p := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "device-plugin-nvidia-gpu-" + string(uuid.NewUUID()),
|
||||
Namespace: ns,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
},
|
||||
|
||||
Spec: ds.Spec.Template.Spec,
|
||||
@@ -69,7 +70,16 @@ func NVIDIADevicePlugin(ns string) *v1.Pod {
|
||||
|
||||
func GetGPUDevicePluginImage() string {
|
||||
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
|
||||
if err != nil || ds == nil || len(ds.Spec.Template.Spec.Containers) < 1 {
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to parse the device plugin image: %v", err)
|
||||
return ""
|
||||
}
|
||||
if ds == nil {
|
||||
glog.Errorf("Failed to parse the device plugin image: the extracted DaemonSet is nil")
|
||||
return ""
|
||||
}
|
||||
if len(ds.Spec.Template.Spec.Containers) < 1 {
|
||||
glog.Errorf("Failed to parse the device plugin image: cannot extract the container from YAML")
|
||||
return ""
|
||||
}
|
||||
return ds.Spec.Template.Spec.Containers[0].Image
|
||||
|
17
vendor/k8s.io/kubernetes/test/e2e/framework/ingress_utils.go
generated
vendored
17
vendor/k8s.io/kubernetes/test/e2e/framework/ingress_utils.go
generated
vendored
@@ -121,8 +121,9 @@ const (
|
||||
// a single character of padding.
|
||||
nameLenLimit = 62
|
||||
|
||||
NEGAnnotation = "alpha.cloud.google.com/load-balancer-neg"
|
||||
NEGUpdateTimeout = 2 * time.Minute
|
||||
NEGAnnotation = "cloud.google.com/neg"
|
||||
NEGStatusAnnotation = "cloud.google.com/neg-status"
|
||||
NEGUpdateTimeout = 2 * time.Minute
|
||||
|
||||
InstanceGroupAnnotation = "ingress.gcp.kubernetes.io/instance-groups"
|
||||
|
||||
@@ -164,6 +165,16 @@ type IngressConformanceTests struct {
|
||||
ExitLog string
|
||||
}
|
||||
|
||||
// NegStatus contains name and zone of the Network Endpoint Group
|
||||
// resources associated with this service.
|
||||
// Needs to be consistent with the NEG internal structs in ingress-gce.
|
||||
type NegStatus struct {
|
||||
// NetworkEndpointGroups returns the mapping between service port and NEG
|
||||
// resource. key is service port, value is the name of the NEG resource.
|
||||
NetworkEndpointGroups map[int32]string `json:"network_endpoint_groups,omitempty"`
|
||||
Zones []string `json:"zones,omitempty"`
|
||||
}
|
||||
|
||||
// CreateIngressComformanceTests generates an slice of sequential test cases:
|
||||
// a simple http ingress, ingress with HTTPS, ingress HTTPS with a modified hostname,
|
||||
// ingress https with a modified URLMap
|
||||
@@ -940,7 +951,7 @@ func (cont *GCEIngressController) backendMode(svcPorts map[string]v1.ServicePort
|
||||
bsMatch := &compute.BackendService{}
|
||||
// Non-NEG BackendServices are named with the Nodeport in the name.
|
||||
// NEG BackendServices' names contain the a sha256 hash of a string.
|
||||
negString := strings.Join([]string{uid, cont.Ns, svcName, sp.TargetPort.String()}, ";")
|
||||
negString := strings.Join([]string{uid, cont.Ns, svcName, fmt.Sprintf("%v", sp.Port)}, ";")
|
||||
negHash := fmt.Sprintf("%x", sha256.Sum256([]byte(negString)))[:8]
|
||||
for _, bs := range beList {
|
||||
if strings.Contains(bs.Name, strconv.Itoa(int(sp.NodePort))) ||
|
||||
|
52
vendor/k8s.io/kubernetes/test/e2e/framework/jobs_util.go
generated
vendored
52
vendor/k8s.io/kubernetes/test/e2e/framework/jobs_util.go
generated
vendored
@@ -27,6 +27,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
jobutil "k8s.io/kubernetes/pkg/controller/job"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -181,8 +182,8 @@ func WaitForAllJobPodsRunning(c clientset.Interface, ns, jobName string, paralle
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForJobFinish uses c to wait for compeletions to complete for the Job jobName in namespace ns.
|
||||
func WaitForJobFinish(c clientset.Interface, ns, jobName string, completions int32) error {
|
||||
// WaitForJobComplete uses c to wait for compeletions to complete for the Job jobName in namespace ns.
|
||||
func WaitForJobComplete(c clientset.Interface, ns, jobName string, completions int32) error {
|
||||
return wait.Poll(Poll, JobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -192,6 +193,17 @@ func WaitForJobFinish(c clientset.Interface, ns, jobName string, completions int
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForJobFinish uses c to wait for the Job jobName in namespace ns to finish (either Failed or Complete).
|
||||
func WaitForJobFinish(c clientset.Interface, ns, jobName string) error {
|
||||
return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return jobutil.IsJobFinished(curr), nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail.
|
||||
func WaitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error {
|
||||
return wait.Poll(Poll, timeout, func() (bool, error) {
|
||||
@@ -239,6 +251,18 @@ func CheckForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parall
|
||||
return count == parallelism, nil
|
||||
}
|
||||
|
||||
// WaitForAllJobPodsRunning wait for all pods for the Job named jobName in namespace ns
|
||||
// to be deleted.
|
||||
func WaitForAllJobPodsGone(c clientset.Interface, ns, jobName string) error {
|
||||
return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
|
||||
pods, err := GetJobPods(c, ns, jobName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(pods.Items) == 0, nil
|
||||
})
|
||||
}
|
||||
|
||||
func newBool(val bool) *bool {
|
||||
p := new(bool)
|
||||
*p = val
|
||||
@@ -250,7 +274,7 @@ type updateJobFunc func(*batch.Job)
|
||||
func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateJobFunc) (job *batch.Job, err error) {
|
||||
jobs := c.BatchV1().Jobs(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
pollErr := wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
|
||||
if job, err = jobs.Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -268,3 +292,25 @@ func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUp
|
||||
}
|
||||
return job, pollErr
|
||||
}
|
||||
|
||||
// WaitForJobDeleting uses c to wait for the Job jobName in namespace ns to have
|
||||
// a non-nil deletionTimestamp (i.e. being deleted).
|
||||
func WaitForJobDeleting(c clientset.Interface, ns, jobName string) error {
|
||||
return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return curr.ObjectMeta.DeletionTimestamp != nil, nil
|
||||
})
|
||||
}
|
||||
|
||||
func JobFinishTime(finishedJob *batch.Job) metav1.Time {
|
||||
var finishTime metav1.Time
|
||||
for _, c := range finishedJob.Status.Conditions {
|
||||
if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == v1.ConditionTrue {
|
||||
return c.LastTransitionTime
|
||||
}
|
||||
}
|
||||
return finishTime
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/kubelet_stats.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/kubelet_stats.go
generated
vendored
@@ -492,7 +492,7 @@ type usageDataPerContainer struct {
|
||||
}
|
||||
|
||||
func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error) {
|
||||
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap")
|
||||
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap", ports.KubeletPort)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/BUILD
generated
vendored
@@ -21,12 +21,12 @@ go_library(
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/expfmt:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/model:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
165
vendor/k8s.io/kubernetes/test/e2e/framework/metrics_util.go
generated
vendored
165
vendor/k8s.io/kubernetes/test/e2e/framework/metrics_util.go
generated
vendored
@@ -23,9 +23,11 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -232,6 +234,143 @@ func (l *SchedulingMetrics) PrintJSON() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
||||
|
||||
type Histogram struct {
|
||||
Labels map[string]string `json:"labels"`
|
||||
Buckets map[string]int `json:"buckets"`
|
||||
}
|
||||
|
||||
type HistogramVec []Histogram
|
||||
|
||||
func newHistogram(labels map[string]string) *Histogram {
|
||||
return &Histogram{
|
||||
Labels: labels,
|
||||
Buckets: make(map[string]int),
|
||||
}
|
||||
}
|
||||
|
||||
type EtcdMetrics struct {
|
||||
BackendCommitDuration HistogramVec `json:"backendCommitDuration"`
|
||||
SnapshotSaveTotalDuration HistogramVec `json:"snapshotSaveTotalDuration"`
|
||||
PeerRoundTripTime HistogramVec `json:"peerRoundTripTime"`
|
||||
WalFsyncDuration HistogramVec `json:"walFsyncDuration"`
|
||||
MaxDatabaseSize float64 `json:"maxDatabaseSize"`
|
||||
}
|
||||
|
||||
func newEtcdMetrics() *EtcdMetrics {
|
||||
return &EtcdMetrics{
|
||||
BackendCommitDuration: make(HistogramVec, 0),
|
||||
SnapshotSaveTotalDuration: make(HistogramVec, 0),
|
||||
PeerRoundTripTime: make(HistogramVec, 0),
|
||||
WalFsyncDuration: make(HistogramVec, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *EtcdMetrics) SummaryKind() string {
|
||||
return "EtcdMetrics"
|
||||
}
|
||||
|
||||
func (l *EtcdMetrics) PrintHumanReadable() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
||||
|
||||
func (l *EtcdMetrics) PrintJSON() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
||||
|
||||
type EtcdMetricsCollector struct {
|
||||
stopCh chan struct{}
|
||||
wg *sync.WaitGroup
|
||||
metrics *EtcdMetrics
|
||||
}
|
||||
|
||||
func NewEtcdMetricsCollector() *EtcdMetricsCollector {
|
||||
return &EtcdMetricsCollector{
|
||||
stopCh: make(chan struct{}),
|
||||
wg: &sync.WaitGroup{},
|
||||
metrics: newEtcdMetrics(),
|
||||
}
|
||||
}
|
||||
|
||||
func getEtcdMetrics() ([]*model.Sample, error) {
|
||||
// Etcd is only exposed on localhost level. We are using ssh method
|
||||
if TestContext.Provider == "gke" {
|
||||
Logf("Not grabbing scheduler metrics through master SSH: unsupported for gke")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
cmd := "curl http://localhost:2379/metrics"
|
||||
sshResult, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
|
||||
if err != nil || sshResult.Code != 0 {
|
||||
return nil, fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err)
|
||||
}
|
||||
data := sshResult.Stdout
|
||||
|
||||
return extractMetricSamples(data)
|
||||
}
|
||||
|
||||
func getEtcdDatabaseSize() (float64, error) {
|
||||
samples, err := getEtcdMetrics()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for _, sample := range samples {
|
||||
if sample.Metric[model.MetricNameLabel] == "etcd_debugging_mvcc_db_total_size_in_bytes" {
|
||||
return float64(sample.Value), nil
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("Couldn't find etcd database size metric")
|
||||
}
|
||||
|
||||
// StartCollecting starts to collect etcd db size metric periodically
|
||||
// and updates MaxDatabaseSize accordingly.
|
||||
func (mc *EtcdMetricsCollector) StartCollecting(interval time.Duration) {
|
||||
mc.wg.Add(1)
|
||||
go func() {
|
||||
defer mc.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-time.After(interval):
|
||||
dbSize, err := getEtcdDatabaseSize()
|
||||
if err != nil {
|
||||
Logf("Failed to collect etcd database size")
|
||||
continue
|
||||
}
|
||||
mc.metrics.MaxDatabaseSize = math.Max(mc.metrics.MaxDatabaseSize, dbSize)
|
||||
case <-mc.stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (mc *EtcdMetricsCollector) StopAndSummarize() error {
|
||||
close(mc.stopCh)
|
||||
mc.wg.Wait()
|
||||
|
||||
// Do some one-off collection of metrics.
|
||||
samples, err := getEtcdMetrics()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, sample := range samples {
|
||||
switch sample.Metric[model.MetricNameLabel] {
|
||||
case "etcd_disk_backend_commit_duration_seconds_bucket":
|
||||
convertSampleToBucket(sample, &mc.metrics.BackendCommitDuration)
|
||||
case "etcd_debugging_snap_save_total_duration_seconds_bucket":
|
||||
convertSampleToBucket(sample, &mc.metrics.SnapshotSaveTotalDuration)
|
||||
case "etcd_disk_wal_fsync_duration_seconds_bucket":
|
||||
convertSampleToBucket(sample, &mc.metrics.WalFsyncDuration)
|
||||
case "etcd_network_peer_round_trip_time_seconds_bucket":
|
||||
convertSampleToBucket(sample, &mc.metrics.PeerRoundTripTime)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mc *EtcdMetricsCollector) GetMetrics() *EtcdMetrics {
|
||||
return mc.metrics
|
||||
}
|
||||
|
||||
type SaturationTime struct {
|
||||
TimeToSaturate time.Duration `json:"timeToSaturate"`
|
||||
NumberOfNodes int `json:"numberOfNodes"`
|
||||
@@ -500,6 +639,9 @@ func sendRestRequestToScheduler(c clientset.Interface, op string) (string, error
|
||||
func getSchedulingLatency(c clientset.Interface) (*SchedulingMetrics, error) {
|
||||
result := SchedulingMetrics{}
|
||||
data, err := sendRestRequestToScheduler(c, "GET")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
samples, err := extractMetricSamples(data)
|
||||
if err != nil {
|
||||
@@ -546,12 +688,33 @@ func VerifySchedulerLatency(c clientset.Interface) (*SchedulingMetrics, error) {
|
||||
|
||||
func ResetSchedulerMetrics(c clientset.Interface) error {
|
||||
responseText, err := sendRestRequestToScheduler(c, "DELETE")
|
||||
if err != nil || responseText != "metrics reset\n" {
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unexpected response: %q", responseText)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertSampleToBucket(sample *model.Sample, h *HistogramVec) {
|
||||
labels := make(map[string]string)
|
||||
for k, v := range sample.Metric {
|
||||
if k != "le" {
|
||||
labels[string(k)] = string(v)
|
||||
}
|
||||
}
|
||||
var hist *Histogram
|
||||
for i := range *h {
|
||||
if reflect.DeepEqual(labels, (*h)[i].Labels) {
|
||||
hist = &((*h)[i])
|
||||
break
|
||||
}
|
||||
}
|
||||
if hist == nil {
|
||||
hist = newHistogram(labels)
|
||||
*h = append(*h, *hist)
|
||||
}
|
||||
hist.Buckets[string(sample.Metric["le"])] = int(sample.Value)
|
||||
}
|
||||
|
||||
func PrettyPrintJSON(metrics interface{}) string {
|
||||
output := &bytes.Buffer{}
|
||||
if err := json.NewEncoder(output).Encode(metrics); err != nil {
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e/framework/networking_utils.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/framework/networking_utils.go
generated
vendored
@@ -948,7 +948,10 @@ func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout
|
||||
// This function executes commands on a node so it will work only for some
|
||||
// environments.
|
||||
func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1.Node, testFunc func()) {
|
||||
host := GetNodeExternalIP(node)
|
||||
host, err := GetNodeExternalIP(node)
|
||||
if err != nil {
|
||||
Failf("Error getting node external ip : %v", err)
|
||||
}
|
||||
master := GetMasterAddress(c)
|
||||
By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
|
||||
defer func() {
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
@@ -63,7 +63,7 @@ func etcdUpgradeGCE(target_storage, target_version string) error {
|
||||
os.Environ(),
|
||||
"TEST_ETCD_VERSION="+target_version,
|
||||
"STORAGE_BACKEND="+target_storage,
|
||||
"TEST_ETCD_IMAGE=3.2.18-0")
|
||||
"TEST_ETCD_IMAGE=3.2.24-1")
|
||||
|
||||
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-l", "-M")
|
||||
return err
|
||||
@@ -103,7 +103,7 @@ func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error {
|
||||
env = append(env,
|
||||
"TEST_ETCD_VERSION="+TestContext.EtcdUpgradeVersion,
|
||||
"STORAGE_BACKEND="+TestContext.EtcdUpgradeStorage,
|
||||
"TEST_ETCD_IMAGE=3.2.18-0")
|
||||
"TEST_ETCD_IMAGE=3.2.24-1")
|
||||
} else {
|
||||
// In e2e tests, we skip the confirmation prompt about
|
||||
// implicit etcd upgrades to simulate the user entering "y".
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/framework/pods.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/framework/pods.go
generated
vendored
@@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/sysctl"
|
||||
|
||||
@@ -259,3 +260,9 @@ func (c *PodClient) MatchContainerOutput(name string, containerName string, expe
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *PodClient) PodIsReady(name string) bool {
|
||||
pod, err := c.Get(name, metav1.GetOptions{})
|
||||
ExpectNoError(err)
|
||||
return podutil.IsPodReady(pod)
|
||||
}
|
||||
|
105
vendor/k8s.io/kubernetes/test/e2e/framework/profile_gatherer.go
generated
vendored
105
vendor/k8s.io/kubernetes/test/e2e/framework/profile_gatherer.go
generated
vendored
@@ -61,25 +61,54 @@ func checkProfileGatheringPrerequisites() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func gatherProfileOfKind(profileBaseName, kind string) error {
|
||||
func getPortForComponent(componentName string) (int, error) {
|
||||
switch componentName {
|
||||
case "kube-apiserver":
|
||||
return 8080, nil
|
||||
case "kube-scheduler":
|
||||
return 10251, nil
|
||||
case "kube-controller-manager":
|
||||
return 10252, nil
|
||||
}
|
||||
return -1, fmt.Errorf("Port for component %v unknown", componentName)
|
||||
}
|
||||
|
||||
// Gathers profiles from a master component through SSH. E.g usages:
|
||||
// - gatherProfile("kube-apiserver", "someTest", "heap")
|
||||
// - gatherProfile("kube-scheduler", "someTest", "profile")
|
||||
// - gatherProfile("kube-controller-manager", "someTest", "profile?seconds=20")
|
||||
//
|
||||
// We don't export this method but wrappers around it (see below).
|
||||
func gatherProfile(componentName, profileBaseName, profileKind string) error {
|
||||
if err := checkProfileGatheringPrerequisites(); err != nil {
|
||||
return fmt.Errorf("Profile gathering pre-requisite failed: %v", err)
|
||||
}
|
||||
profilePort, err := getPortForComponent(componentName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Profile gathering failed finding component port: %v", err)
|
||||
}
|
||||
if profileBaseName == "" {
|
||||
profileBaseName = time.Now().Format(time.RFC3339)
|
||||
}
|
||||
|
||||
// Get the profile data over SSH.
|
||||
getCommand := fmt.Sprintf("curl -s localhost:8080/debug/pprof/%s", kind)
|
||||
getCommand := fmt.Sprintf("curl -s localhost:%v/debug/pprof/%s", profilePort, profileKind)
|
||||
sshResult, err := SSH(getCommand, GetMasterHost()+":22", TestContext.Provider)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to execute curl command on master through SSH: %v", err)
|
||||
}
|
||||
|
||||
var profilePrefix string
|
||||
profilePrefix := componentName
|
||||
switch {
|
||||
case kind == "heap":
|
||||
profilePrefix = "ApiserverMemoryProfile_"
|
||||
case strings.HasPrefix(kind, "profile"):
|
||||
profilePrefix = "ApiserverCPUProfile_"
|
||||
case profileKind == "heap":
|
||||
profilePrefix += "_MemoryProfile_"
|
||||
case strings.HasPrefix(profileKind, "profile"):
|
||||
profilePrefix += "_CPUProfile_"
|
||||
default:
|
||||
return fmt.Errorf("Unknown profile kind provided: %s", kind)
|
||||
return fmt.Errorf("Unknown profile kind provided: %s", profileKind)
|
||||
}
|
||||
|
||||
// Write the data to a file.
|
||||
// Write the profile data to a file.
|
||||
rawprofilePath := path.Join(getProfilesDirectoryPath(), profilePrefix+profileBaseName+".pprof")
|
||||
rawprofile, err := os.Create(rawprofilePath)
|
||||
if err != nil {
|
||||
@@ -97,12 +126,12 @@ func gatherProfileOfKind(profileBaseName, kind string) error {
|
||||
var cmd *exec.Cmd
|
||||
switch {
|
||||
// TODO: Support other profile kinds if needed (e.g inuse_space, alloc_objects, mutex, etc)
|
||||
case kind == "heap":
|
||||
case profileKind == "heap":
|
||||
cmd = exec.Command("go", "tool", "pprof", "-pdf", "-symbolize=none", "--alloc_space", rawprofile.Name())
|
||||
case strings.HasPrefix(kind, "profile"):
|
||||
case strings.HasPrefix(profileKind, "profile"):
|
||||
cmd = exec.Command("go", "tool", "pprof", "-pdf", "-symbolize=none", rawprofile.Name())
|
||||
default:
|
||||
return fmt.Errorf("Unknown profile kind provided: %s", kind)
|
||||
return fmt.Errorf("Unknown profile kind provided: %s", profileKind)
|
||||
}
|
||||
outfilePath := path.Join(getProfilesDirectoryPath(), profilePrefix+profileBaseName+".pdf")
|
||||
outfile, err := os.Create(outfilePath)
|
||||
@@ -124,67 +153,53 @@ func gatherProfileOfKind(profileBaseName, kind string) error {
|
||||
// finish before the parent goroutine itself finishes, we accept a sync.WaitGroup
|
||||
// argument in these functions. Typically you would use the following pattern:
|
||||
//
|
||||
// func TestFooBar() {
|
||||
// func TestFoo() {
|
||||
// var wg sync.WaitGroup
|
||||
// wg.Add(3)
|
||||
// go framework.GatherApiserverCPUProfile(&wg, "doing_foo")
|
||||
// go framework.GatherApiserverMemoryProfile(&wg, "doing_foo")
|
||||
// go framework.GatherCPUProfile("kube-apiserver", "before_foo", &wg)
|
||||
// go framework.GatherMemoryProfile("kube-apiserver", "before_foo", &wg)
|
||||
// <<<< some code doing foo >>>>>>
|
||||
// go framework.GatherApiserverCPUProfile(&wg, "doing_bar")
|
||||
// <<<< some code doing bar >>>>>>
|
||||
// go framework.GatherCPUProfile("kube-scheduler", "after_foo", &wg)
|
||||
// wg.Wait()
|
||||
// }
|
||||
//
|
||||
// If you do not wish to exercise the waiting logic, pass a nil value for the
|
||||
// waitgroup argument instead. However, then you would be responsible for ensuring
|
||||
// that the function finishes.
|
||||
// that the function finishes. There's also a polling-based gatherer utility for
|
||||
// CPU profiles available below.
|
||||
|
||||
func GatherApiserverCPUProfile(wg *sync.WaitGroup, profileBaseName string) {
|
||||
GatherApiserverCPUProfileForNSeconds(wg, profileBaseName, DefaultCPUProfileSeconds)
|
||||
func GatherCPUProfile(componentName string, profileBaseName string, wg *sync.WaitGroup) {
|
||||
GatherCPUProfileForSeconds(componentName, profileBaseName, DefaultCPUProfileSeconds, wg)
|
||||
}
|
||||
|
||||
func GatherApiserverCPUProfileForNSeconds(wg *sync.WaitGroup, profileBaseName string, n int) {
|
||||
func GatherCPUProfileForSeconds(componentName string, profileBaseName string, seconds int, wg *sync.WaitGroup) {
|
||||
if wg != nil {
|
||||
defer wg.Done()
|
||||
}
|
||||
if err := checkProfileGatheringPrerequisites(); err != nil {
|
||||
Logf("Profile gathering pre-requisite failed: %v", err)
|
||||
return
|
||||
}
|
||||
if profileBaseName == "" {
|
||||
profileBaseName = time.Now().Format(time.RFC3339)
|
||||
}
|
||||
if err := gatherProfileOfKind(profileBaseName, fmt.Sprintf("profile?seconds=%v", n)); err != nil {
|
||||
Logf("Failed to gather apiserver CPU profile: %v", err)
|
||||
if err := gatherProfile(componentName, profileBaseName, fmt.Sprintf("profile?seconds=%v", seconds)); err != nil {
|
||||
Logf("Failed to gather %v CPU profile: %v", componentName, err)
|
||||
}
|
||||
}
|
||||
|
||||
func GatherApiserverMemoryProfile(wg *sync.WaitGroup, profileBaseName string) {
|
||||
func GatherMemoryProfile(componentName string, profileBaseName string, wg *sync.WaitGroup) {
|
||||
if wg != nil {
|
||||
defer wg.Done()
|
||||
}
|
||||
if err := checkProfileGatheringPrerequisites(); err != nil {
|
||||
Logf("Profile gathering pre-requisite failed: %v", err)
|
||||
return
|
||||
}
|
||||
if profileBaseName == "" {
|
||||
profileBaseName = time.Now().Format(time.RFC3339)
|
||||
}
|
||||
if err := gatherProfileOfKind(profileBaseName, "heap"); err != nil {
|
||||
Logf("Failed to gather apiserver memory profile: %v", err)
|
||||
if err := gatherProfile(componentName, profileBaseName, "heap"); err != nil {
|
||||
Logf("Failed to gather %v memory profile: %v", componentName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// StartApiserverCPUProfileGatherer is a polling-based gatherer of the apiserver's
|
||||
// CPU profile. It takes the delay b/w consecutive gatherings as an argument and
|
||||
// StartCPUProfileGatherer performs polling-based gathering of the component's CPU
|
||||
// profile. It takes the interval b/w consecutive gatherings as an argument and
|
||||
// starts the gathering goroutine. To stop the gatherer, close the returned channel.
|
||||
func StartApiserverCPUProfileGatherer(delay time.Duration) chan struct{} {
|
||||
func StartCPUProfileGatherer(componentName string, profileBaseName string, interval time.Duration) chan struct{} {
|
||||
stopCh := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
GatherApiserverCPUProfile(nil, "")
|
||||
case <-time.After(interval):
|
||||
GatherCPUProfile(componentName, profileBaseName+"_"+time.Now().Format(time.RFC3339), nil)
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
|
34
vendor/k8s.io/kubernetes/test/e2e/framework/pv_util.go
generated
vendored
34
vendor/k8s.io/kubernetes/test/e2e/framework/pv_util.go
generated
vendored
@@ -48,6 +48,12 @@ const (
|
||||
VolumeSelectorKey = "e2e-pv-pool"
|
||||
)
|
||||
|
||||
var (
|
||||
// Common selinux labels
|
||||
SELinuxLabel = &v1.SELinuxOptions{
|
||||
Level: "s0:c0,c1"}
|
||||
)
|
||||
|
||||
// Map of all PVs used in the multi pv-pvc tests. The key is the PV's name, which is
|
||||
// guaranteed to be unique. The value is {} (empty struct) since we're only interested
|
||||
// in the PV's name and if it is present. We must always Get the pv object before
|
||||
@@ -501,22 +507,28 @@ func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error {
|
||||
// Deletes the passed-in pod and waits for the pod to be terminated. Resilient to the pod
|
||||
// not existing.
|
||||
func DeletePodWithWait(f *Framework, c clientset.Interface, pod *v1.Pod) error {
|
||||
const maxWait = 5 * time.Minute
|
||||
if pod == nil {
|
||||
return nil
|
||||
}
|
||||
Logf("Deleting pod %q in namespace %q", pod.Name, pod.Namespace)
|
||||
err := c.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil)
|
||||
return DeletePodWithWaitByName(f, c, pod.GetName(), pod.GetNamespace())
|
||||
}
|
||||
|
||||
// Deletes the named and namespaced pod and waits for the pod to be terminated. Resilient to the pod
|
||||
// not existing.
|
||||
func DeletePodWithWaitByName(f *Framework, c clientset.Interface, podName, podNamespace string) error {
|
||||
const maxWait = 5 * time.Minute
|
||||
Logf("Deleting pod %q in namespace %q", podName, podNamespace)
|
||||
err := c.CoreV1().Pods(podNamespace).Delete(podName, nil)
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
return nil // assume pod was already deleted
|
||||
}
|
||||
return fmt.Errorf("pod Delete API error: %v", err)
|
||||
}
|
||||
Logf("Wait up to %v for pod %q to be fully deleted", maxWait, pod.Name)
|
||||
err = f.WaitForPodNotFound(pod.Name, maxWait)
|
||||
Logf("Wait up to %v for pod %q to be fully deleted", maxWait, podName)
|
||||
err = f.WaitForPodNotFound(podName, maxWait)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pod %q was not deleted: %v", pod.Name, err)
|
||||
return fmt.Errorf("pod %q was not deleted: %v", podName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -716,7 +728,7 @@ func createPD(zone string) (string, error) {
|
||||
}
|
||||
|
||||
tags := map[string]string{}
|
||||
err = gceCloud.CreateDisk(pdName, gcecloud.DiskTypeSSD, zone, 10 /* sizeGb */, tags)
|
||||
err = gceCloud.CreateDisk(pdName, gcecloud.DiskTypeStandard, zone, 2 /* sizeGb */, tags)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -1000,11 +1012,19 @@ func CreateNginxPod(client clientset.Interface, namespace string, nodeSelector m
|
||||
|
||||
// create security pod with given claims
|
||||
func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, timeout time.Duration) (*v1.Pod, error) {
|
||||
return CreateSecPodWithNodeName(client, namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup, "", timeout)
|
||||
}
|
||||
|
||||
// create security pod with given claims
|
||||
func CreateSecPodWithNodeName(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, nodeName string, timeout time.Duration) (*v1.Pod, error) {
|
||||
pod := MakeSecPod(namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup)
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(pod)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
}
|
||||
// Setting nodeName
|
||||
pod.Spec.NodeName = nodeName
|
||||
|
||||
// Waiting for pod to be running
|
||||
err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, namespace, timeout)
|
||||
if err != nil {
|
||||
|
10
vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go
generated
vendored
@@ -216,6 +216,14 @@ func WaitForReplicationControllerwithSelector(c clientset.Interface, namespace s
|
||||
return nil
|
||||
}
|
||||
|
||||
// trimDockerRegistry is the function for trimming the docker.io/library from the beginning of the imagename.
|
||||
// If community docker installed it will not prefix the registry names with the dockerimages vs registry names prefixed with other runtimes or docker installed via RHEL extra repo.
|
||||
// So this function will help to trim the docker.io/library if exists
|
||||
func trimDockerRegistry(imagename string) string {
|
||||
imagename = strings.Replace(imagename, "docker.io/", "", 1)
|
||||
return strings.Replace(imagename, "library/", "", 1)
|
||||
}
|
||||
|
||||
// validatorFn is the function which is individual tests will implement.
|
||||
// we may want it to return more than just an error, at some point.
|
||||
type validatorFn func(c clientset.Interface, podID string) error
|
||||
@@ -227,6 +235,7 @@ type validatorFn func(c clientset.Interface, podID string) error
|
||||
// "testname": which gets bubbled up to the logging/failure messages if errors happen.
|
||||
// "validator" function: This function is given a podID and a client, and it can do some specific validations that way.
|
||||
func ValidateController(c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {
|
||||
containerImage = trimDockerRegistry(containerImage)
|
||||
getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}"
|
||||
// NB: kubectl adds the "exists" function to the standard template functions.
|
||||
// This lets us check to see if the "running" entry exists for each of the containers
|
||||
@@ -258,6 +267,7 @@ waitLoop:
|
||||
}
|
||||
|
||||
currentImage := RunKubectlOrDie("get", "pods", podID, "-o", "template", getImageTemplate, fmt.Sprintf("--namespace=%v", ns))
|
||||
currentImage = trimDockerRegistry(currentImage)
|
||||
if currentImage != containerImage {
|
||||
Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
|
||||
continue waitLoop
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go
generated
vendored
@@ -254,7 +254,7 @@ func (j *ServiceTestJig) CreateOnlyLocalNodePortService(namespace, serviceName s
|
||||
svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeNodePort
|
||||
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
||||
svc.Spec.Ports = []v1.ServicePort{{Protocol: "TCP", Port: 80}}
|
||||
svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: 80}}
|
||||
})
|
||||
|
||||
if createPod {
|
||||
@@ -1414,7 +1414,7 @@ func CreateServiceSpec(serviceName, externalName string, isHeadless bool, select
|
||||
headlessService.Spec.ExternalName = externalName
|
||||
} else {
|
||||
headlessService.Spec.Ports = []v1.ServicePort{
|
||||
{Port: 80, Name: "http", Protocol: "TCP"},
|
||||
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
|
||||
}
|
||||
}
|
||||
if isHeadless {
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/statefulset_utils.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/statefulset_utils.go
generated
vendored
@@ -64,7 +64,7 @@ func CreateStatefulSetService(name string, labels map[string]string) *v1.Service
|
||||
},
|
||||
}
|
||||
headlessService.Spec.Ports = []v1.ServicePort{
|
||||
{Port: 80, Name: "http", Protocol: "TCP"},
|
||||
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
|
||||
}
|
||||
headlessService.Spec.ClusterIP = "None"
|
||||
return headlessService
|
||||
@@ -810,7 +810,7 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
Image: imageutils.GetE2EImage(imageutils.Nginx),
|
||||
VolumeMounts: mounts,
|
||||
},
|
||||
},
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
@@ -31,7 +31,7 @@ import (
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
"k8s.io/kubernetes/pkg/kubemark"
|
||||
)
|
||||
|
||||
@@ -99,6 +99,8 @@ type TestContextType struct {
|
||||
OutputPrintType string
|
||||
// NodeSchedulableTimeout is the timeout for waiting for all nodes to be schedulable.
|
||||
NodeSchedulableTimeout time.Duration
|
||||
// SystemDaemonsetStartupTimeout is the timeout for waiting for all system daemonsets to be ready.
|
||||
SystemDaemonsetStartupTimeout time.Duration
|
||||
// CreateTestingNS is responsible for creating namespace used for executing e2e tests.
|
||||
// It accepts namespace base name, which will be prepended with e2e prefix, kube client
|
||||
// and labels to be applied to a namespace.
|
||||
@@ -277,6 +279,7 @@ func RegisterClusterFlags() {
|
||||
flag.IntVar(&TestContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used.")
|
||||
flag.DurationVar(&TestContext.SystemPodsStartupTimeout, "system-pods-startup-timeout", 10*time.Minute, "Timeout for waiting for all system pods to be running before starting tests.")
|
||||
flag.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 30*time.Minute, "Timeout for waiting for all nodes to be schedulable.")
|
||||
flag.DurationVar(&TestContext.SystemDaemonsetStartupTimeout, "system-daemonsets-startup-timeout", 5*time.Minute, "Timeout for waiting for all system daemonsets to be ready.")
|
||||
flag.StringVar(&TestContext.UpgradeTarget, "upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.")
|
||||
flag.StringVar(&TestContext.EtcdUpgradeStorage, "etcd-upgrade-storage", "", "The storage version to upgrade to (either 'etcdv2' or 'etcdv3') if doing an etcd upgrade test.")
|
||||
flag.StringVar(&TestContext.EtcdUpgradeVersion, "etcd-upgrade-version", "", "The etcd binary version to upgrade to (e.g., '3.0.14', '2.3.7') if doing an etcd upgrade test.")
|
||||
@@ -339,7 +342,7 @@ func createKubeConfig(clientCfg *restclient.Config) *clientcmdapi.Config {
|
||||
config := clientcmdapi.NewConfig()
|
||||
|
||||
credentials := clientcmdapi.NewAuthInfo()
|
||||
credentials.Token = clientCfg.BearerToken
|
||||
credentials.TokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
credentials.ClientCertificate = clientCfg.TLSClientConfig.CertFile
|
||||
if len(credentials.ClientCertificate) == 0 {
|
||||
credentials.ClientCertificateData = clientCfg.TLSClientConfig.CertData
|
||||
|
128
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
128
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
@@ -66,15 +66,15 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
|
||||
@@ -87,6 +87,7 @@ import (
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||
"k8s.io/kubernetes/pkg/controller/service"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
@@ -200,19 +201,10 @@ const (
|
||||
|
||||
// ssh port
|
||||
sshPort = "22"
|
||||
|
||||
// ImagePrePullingTimeout is the time we wait for the e2e-image-puller
|
||||
// static pods to pull the list of seeded images. If they don't pull
|
||||
// images within this time we simply log their output and carry on
|
||||
// with the tests.
|
||||
ImagePrePullingTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
BusyBoxImage = "busybox"
|
||||
// Label allocated to the image puller static pod that runs on each node
|
||||
// before e2es.
|
||||
ImagePullerLabels = map[string]string{"name": "e2e-image-puller"}
|
||||
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
|
||||
|
||||
// For parsing Kubectl version for version-skewed testing.
|
||||
gitVersionRegexp = regexp.MustCompile("GitVersion:\"(v.+?)\"")
|
||||
@@ -250,11 +242,6 @@ func GetServerArchitecture(c clientset.Interface) string {
|
||||
return arch
|
||||
}
|
||||
|
||||
// GetPauseImageName fetches the pause image name for the same architecture as the apiserver.
|
||||
func GetPauseImageName(c clientset.Interface) string {
|
||||
return imageutils.GetE2EImageWithArch(imageutils.Pause, GetServerArchitecture(c))
|
||||
}
|
||||
|
||||
func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) {
|
||||
return request.Resource("services").SubResource("proxy"), nil
|
||||
}
|
||||
@@ -272,7 +259,7 @@ type ContainerFailures struct {
|
||||
func GetMasterHost() string {
|
||||
masterUrl, err := url.Parse(TestContext.Host)
|
||||
ExpectNoError(err)
|
||||
return masterUrl.Host
|
||||
return masterUrl.Hostname()
|
||||
}
|
||||
|
||||
func nowStamp() string {
|
||||
@@ -638,7 +625,7 @@ func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[s
|
||||
//
|
||||
// If ignoreLabels is not empty, pods matching this selector are ignored.
|
||||
func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
|
||||
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
|
||||
ignoreSelector := labels.SelectorFromSet(map[string]string{})
|
||||
start := time.Now()
|
||||
Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
|
||||
timeout, minPods, ns)
|
||||
@@ -738,6 +725,40 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForDaemonSets for all daemonsets in the given namespace to be ready
|
||||
// (defined as all but 'allowedNotReadyNodes' pods associated with that
|
||||
// daemonset are ready).
|
||||
func WaitForDaemonSets(c clientset.Interface, ns string, allowedNotReadyNodes int32, timeout time.Duration) error {
|
||||
start := time.Now()
|
||||
Logf("Waiting up to %v for all daemonsets in namespace '%s' to start",
|
||||
timeout, ns)
|
||||
|
||||
return wait.PollImmediate(Poll, timeout, func() (bool, error) {
|
||||
dsList, err := c.AppsV1().DaemonSets(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error getting daemonsets in namespace: '%s': %v", ns, err)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
var notReadyDaemonSets []string
|
||||
for _, ds := range dsList.Items {
|
||||
Logf("%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)", ds.Status.NumberReady, ds.Status.DesiredNumberScheduled, ns, ds.ObjectMeta.Name, int(time.Since(start).Seconds()))
|
||||
if ds.Status.DesiredNumberScheduled-ds.Status.NumberReady > allowedNotReadyNodes {
|
||||
notReadyDaemonSets = append(notReadyDaemonSets, ds.ObjectMeta.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if len(notReadyDaemonSets) > 0 {
|
||||
Logf("there are not ready daemonsets: %v", notReadyDaemonSets)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if strings.Contains(container.Name, containerNameSubstr) {
|
||||
@@ -862,7 +883,9 @@ func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountN
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = watch.Until(timeout, w, conditions.ServiceAccountHasSecrets)
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, w, conditions.ServiceAccountHasSecrets)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1578,7 +1601,9 @@ func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.D
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = watch.Until(timeout, w, func(event watch.Event) (bool, error) {
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, w, func(event watch.Event) (bool, error) {
|
||||
switch event.Type {
|
||||
case watch.Deleted:
|
||||
return false, apierrs.NewNotFound(schema.GroupResource{Resource: "replicationcontrollers"}, "")
|
||||
@@ -2621,6 +2646,8 @@ func GetReadyNodesIncludingTaintedOrDie(c clientset.Interface) (nodes *v1.NodeLi
|
||||
return nodes
|
||||
}
|
||||
|
||||
// WaitForAllNodesSchedulable waits up to timeout for all
|
||||
// (but TestContext.AllowedNotReadyNodes) to become scheduable.
|
||||
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
|
||||
Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes)
|
||||
|
||||
@@ -2643,7 +2670,13 @@ func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) er
|
||||
}
|
||||
for i := range nodes.Items {
|
||||
node := &nodes.Items[i]
|
||||
if !isNodeSchedulable(node) {
|
||||
if _, hasMasterRoleLabel := node.ObjectMeta.Labels[service.LabelNodeRoleMaster]; hasMasterRoleLabel {
|
||||
// Kops clusters have masters with spec.unscheduable = false and
|
||||
// node-role.kubernetes.io/master NoSchedule taint.
|
||||
// Don't wait for them.
|
||||
continue
|
||||
}
|
||||
if !isNodeSchedulable(node) || !isNodeUntainted(node) {
|
||||
notSchedulable = append(notSchedulable, node)
|
||||
}
|
||||
}
|
||||
@@ -2659,10 +2692,11 @@ func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) er
|
||||
if len(nodes.Items) >= largeClusterThreshold && attempt%10 == 0 {
|
||||
Logf("Unschedulable nodes:")
|
||||
for i := range notSchedulable {
|
||||
Logf("-> %s Ready=%t Network=%t",
|
||||
Logf("-> %s Ready=%t Network=%t Taints=%v",
|
||||
notSchedulable[i].Name,
|
||||
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeReady, true),
|
||||
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false))
|
||||
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false),
|
||||
notSchedulable[i].Spec.Taints)
|
||||
}
|
||||
Logf("================================")
|
||||
}
|
||||
@@ -4119,7 +4153,9 @@ func CheckNodesReady(c clientset.Interface, size int, timeout time.Duration) ([]
|
||||
|
||||
// Filter out not-ready nodes.
|
||||
FilterNodes(nodes, func(node v1.Node) bool {
|
||||
return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
|
||||
nodeReady := IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
|
||||
networkReady := IsNodeConditionUnset(&node, v1.NodeNetworkUnavailable) || IsNodeConditionSetAsExpected(&node, v1.NodeNetworkUnavailable, false)
|
||||
return nodeReady && networkReady
|
||||
})
|
||||
numReady := len(nodes.Items)
|
||||
|
||||
@@ -4492,7 +4528,7 @@ func isElementOf(podUID types.UID, pods *v1.PodList) bool {
|
||||
const proxyTimeout = 2 * time.Minute
|
||||
|
||||
// NodeProxyRequest performs a get on a node proxy endpoint given the nodename and rest client.
|
||||
func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.Result, error) {
|
||||
func NodeProxyRequest(c clientset.Interface, node, endpoint string, port int) (restclient.Result, error) {
|
||||
// proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call.
|
||||
// This will leak a goroutine if proxy hangs. #22165
|
||||
var result restclient.Result
|
||||
@@ -4501,7 +4537,7 @@ func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.
|
||||
result = c.CoreV1().RESTClient().Get().
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
|
||||
Name(fmt.Sprintf("%v:%v", node, port)).
|
||||
Suffix(endpoint).
|
||||
Do()
|
||||
|
||||
@@ -4529,7 +4565,7 @@ func GetKubeletRunningPods(c clientset.Interface, node string) (*v1.PodList, err
|
||||
|
||||
func getKubeletPods(c clientset.Interface, node, resource string) (*v1.PodList, error) {
|
||||
result := &v1.PodList{}
|
||||
client, err := NodeProxyRequest(c, node, resource)
|
||||
client, err := NodeProxyRequest(c, node, resource, ports.KubeletPort)
|
||||
if err != nil {
|
||||
return &v1.PodList{}, err
|
||||
}
|
||||
@@ -4798,7 +4834,8 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
|
||||
func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeList) {
|
||||
nodes := &v1.NodeList{}
|
||||
masters := sets.NewString()
|
||||
all, _ := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
all, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
ExpectNoError(err)
|
||||
for _, n := range all.Items {
|
||||
if system.IsMasterNode(n.Name) {
|
||||
masters.Insert(n.Name)
|
||||
@@ -4842,8 +4879,8 @@ func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testut
|
||||
func (p *E2ETestNodePreparer) PrepareNodes() error {
|
||||
nodes := GetReadySchedulableNodesOrDie(p.client)
|
||||
numTemplates := 0
|
||||
for k := range p.countToStrategy {
|
||||
numTemplates += k
|
||||
for _, v := range p.countToStrategy {
|
||||
numTemplates += v.Count
|
||||
}
|
||||
if numTemplates > len(nodes.Items) {
|
||||
return fmt.Errorf("Can't prepare Nodes. Got more templates than existing Nodes.")
|
||||
@@ -4999,7 +5036,7 @@ func GetMasterAddress(c clientset.Interface) string {
|
||||
|
||||
// GetNodeExternalIP returns node external IP concatenated with port 22 for ssh
|
||||
// e.g. 1.2.3.4:22
|
||||
func GetNodeExternalIP(node *v1.Node) string {
|
||||
func GetNodeExternalIP(node *v1.Node) (string, error) {
|
||||
Logf("Getting external IP address for %s", node.Name)
|
||||
host := ""
|
||||
for _, a := range node.Status.Addresses {
|
||||
@@ -5009,9 +5046,26 @@ func GetNodeExternalIP(node *v1.Node) string {
|
||||
}
|
||||
}
|
||||
if host == "" {
|
||||
Failf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
|
||||
return "", fmt.Errorf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
|
||||
}
|
||||
return host
|
||||
return host, nil
|
||||
}
|
||||
|
||||
// GetNodeInternalIP returns node internal IP
|
||||
func GetNodeInternalIP(node *v1.Node) (string, error) {
|
||||
host := ""
|
||||
for _, address := range node.Status.Addresses {
|
||||
if address.Type == v1.NodeInternalIP {
|
||||
if address.Address != "" {
|
||||
host = net.JoinHostPort(address.Address, sshPort)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if host == "" {
|
||||
return "", fmt.Errorf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
|
||||
}
|
||||
return host, nil
|
||||
}
|
||||
|
||||
// SimpleGET executes a get on the given url, returns error if non-200 returned.
|
||||
@@ -5076,7 +5130,7 @@ func (f *Framework) NewTestPod(name string, requests v1.ResourceList, limits v1.
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pause",
|
||||
Image: GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: requests,
|
||||
Limits: limits,
|
||||
|
99
vendor/k8s.io/kubernetes/test/e2e/framework/volume_util.go
generated
vendored
99
vendor/k8s.io/kubernetes/test/e2e/framework/volume_util.go
generated
vendored
@@ -41,16 +41,17 @@ package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
@@ -66,7 +67,7 @@ const (
|
||||
TiB int64 = 1024 * GiB
|
||||
|
||||
// Waiting period for volume server (Ceph, ...) to initialize itself.
|
||||
VolumeServerPodStartupSleep = 20 * time.Second
|
||||
VolumeServerPodStartupTimeout = 3 * time.Minute
|
||||
|
||||
// Waiting period for pod to be cleaned up and unmount its volumes so we
|
||||
// don't tear down containers with NFS/Ceph/Gluster server too early.
|
||||
@@ -92,6 +93,8 @@ type VolumeTestConfig struct {
|
||||
// map <host (source) path> -> <container (dst.) path>
|
||||
// if <host (source) path> is empty, mount a tmpfs emptydir
|
||||
ServerVolumes map[string]string
|
||||
// Message to wait for before starting clients
|
||||
ServerReadyMessage string
|
||||
// Wait for the pod to terminate successfully
|
||||
// False indicates that the pod is long running
|
||||
WaitForCompletion bool
|
||||
@@ -114,11 +117,12 @@ type VolumeTest struct {
|
||||
// NFS-specific wrapper for CreateStorageServer.
|
||||
func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
|
||||
config = VolumeTestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "nfs",
|
||||
ServerImage: imageutils.GetE2EImage(imageutils.VolumeNFSServer),
|
||||
ServerPorts: []int{2049},
|
||||
ServerVolumes: map[string]string{"": "/exports"},
|
||||
Namespace: namespace,
|
||||
Prefix: "nfs",
|
||||
ServerImage: imageutils.GetE2EImage(imageutils.VolumeNFSServer),
|
||||
ServerPorts: []int{2049},
|
||||
ServerVolumes: map[string]string{"": "/exports"},
|
||||
ServerReadyMessage: "NFS started",
|
||||
}
|
||||
if len(args) > 0 {
|
||||
config.ServerArgs = args
|
||||
@@ -180,6 +184,7 @@ func NewISCSIServer(cs clientset.Interface, namespace string) (config VolumeTest
|
||||
// iSCSI container needs to insert modules from the host
|
||||
"/lib/modules": "/lib/modules",
|
||||
},
|
||||
ServerReadyMessage: "Configuration restored from /etc/target/saveconfig.json",
|
||||
}
|
||||
pod, ip = CreateStorageServer(cs, config)
|
||||
return config, pod, ip
|
||||
@@ -195,16 +200,9 @@ func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestCo
|
||||
ServerVolumes: map[string]string{
|
||||
"/lib/modules": "/lib/modules",
|
||||
},
|
||||
ServerReadyMessage: "Ceph is ready",
|
||||
}
|
||||
pod, ip = CreateStorageServer(cs, config)
|
||||
|
||||
// Ceph server container needs some time to start. Tests continue working if
|
||||
// this sleep is removed, however kubelet logs (and kubectl describe
|
||||
// <client pod>) would be cluttered with error messages about non-existing
|
||||
// image.
|
||||
Logf("sleeping a bit to give ceph server time to initialize")
|
||||
time.Sleep(VolumeServerPodStartupSleep)
|
||||
|
||||
// create secrets for the server
|
||||
secret = &v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -350,40 +348,52 @@ func StartVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.
|
||||
ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err)
|
||||
}
|
||||
}
|
||||
if config.ServerReadyMessage != "" {
|
||||
_, err := LookForStringInLog(pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout)
|
||||
ExpectNoError(err, "Failed to find %q in pod logs: %s", config.ServerReadyMessage, err)
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
// Wrapper of cleanup function for volume server without secret created by specific CreateStorageServer function.
|
||||
func CleanUpVolumeServer(f *Framework, serverPod *v1.Pod) {
|
||||
CleanUpVolumeServerWithSecret(f, serverPod, nil)
|
||||
}
|
||||
|
||||
// Wrapper of cleanup function for volume server with secret created by specific CreateStorageServer function.
|
||||
func CleanUpVolumeServerWithSecret(f *Framework, serverPod *v1.Pod, secret *v1.Secret) {
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace
|
||||
|
||||
if secret != nil {
|
||||
Logf("Deleting server secret %q...", secret.Name)
|
||||
err := cs.CoreV1().Secrets(ns.Name).Delete(secret.Name, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
Logf("Delete secret failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
Logf("Deleting server pod %q...", serverPod.Name)
|
||||
err := DeletePodWithWait(f, cs, serverPod)
|
||||
if err != nil {
|
||||
Logf("Server pod delete failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Clean both server and client pods.
|
||||
func VolumeTestCleanup(f *Framework, config VolumeTestConfig) {
|
||||
By(fmt.Sprint("cleaning the environment after ", config.Prefix))
|
||||
|
||||
defer GinkgoRecover()
|
||||
|
||||
client := f.ClientSet
|
||||
podClient := client.CoreV1().Pods(config.Namespace)
|
||||
cs := f.ClientSet
|
||||
|
||||
err := podClient.Delete(config.Prefix+"-client", nil)
|
||||
if err != nil {
|
||||
// Log the error before failing test: if the test has already failed,
|
||||
// framework.ExpectNoError() won't print anything to logs!
|
||||
glog.Warningf("Failed to delete client pod: %v", err)
|
||||
ExpectNoError(err, "Failed to delete client pod: %v", err)
|
||||
}
|
||||
err := DeletePodWithWaitByName(f, cs, config.Prefix+"-client", config.Namespace)
|
||||
Expect(err).To(BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-client", config.Namespace)
|
||||
|
||||
if config.ServerImage != "" {
|
||||
if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) {
|
||||
ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
|
||||
}
|
||||
// See issue #24100.
|
||||
// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
|
||||
By("sleeping a bit so kubelet can unmount and detach the volume")
|
||||
time.Sleep(PodCleanupTimeout)
|
||||
|
||||
err = podClient.Delete(config.Prefix+"-server", nil)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to delete server pod: %v", err)
|
||||
ExpectNoError(err, "Failed to delete server pod: %v", err)
|
||||
}
|
||||
err := DeletePodWithWaitByName(f, cs, config.Prefix+"-server", config.Namespace)
|
||||
Expect(err).To(BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -435,9 +445,7 @@ func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGro
|
||||
}
|
||||
podsNamespacer := client.CoreV1().Pods(config.Namespace)
|
||||
|
||||
if fsGroup != nil {
|
||||
clientPod.Spec.SecurityContext.FSGroup = fsGroup
|
||||
}
|
||||
clientPod.Spec.SecurityContext.FSGroup = fsGroup
|
||||
|
||||
for i, test := range tests {
|
||||
volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i)
|
||||
@@ -476,6 +484,8 @@ func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGro
|
||||
func InjectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.VolumeSource, content string) {
|
||||
By(fmt.Sprint("starting ", config.Prefix, " injector"))
|
||||
podClient := client.CoreV1().Pods(config.Namespace)
|
||||
podName := fmt.Sprintf("%s-injector-%s", config.Prefix, rand.String(4))
|
||||
volMountName := fmt.Sprintf("%s-volume-%s", config.Prefix, rand.String(4))
|
||||
|
||||
injectPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -483,7 +493,7 @@ func InjectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.V
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-injector",
|
||||
Name: podName,
|
||||
Labels: map[string]string{
|
||||
"role": config.Prefix + "-injector",
|
||||
},
|
||||
@@ -497,7 +507,7 @@ func InjectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.V
|
||||
Args: []string{"-c", "echo '" + content + "' > /mnt/index.html && chmod o+rX /mnt /mnt/index.html"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: config.Prefix + "-volume",
|
||||
Name: volMountName,
|
||||
MountPath: "/mnt",
|
||||
},
|
||||
},
|
||||
@@ -511,16 +521,17 @@ func InjectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.V
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: config.Prefix + "-volume",
|
||||
Name: volMountName,
|
||||
VolumeSource: volume,
|
||||
},
|
||||
},
|
||||
NodeName: config.ClientNodeName,
|
||||
NodeSelector: config.NodeSelector,
|
||||
},
|
||||
}
|
||||
|
||||
defer func() {
|
||||
podClient.Delete(config.Prefix+"-injector", nil)
|
||||
podClient.Delete(podName, nil)
|
||||
}()
|
||||
|
||||
injectPod, err := podClient.Create(injectPod)
|
||||
|
Reference in New Issue
Block a user