Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -13,12 +13,17 @@ go_library(
],
importpath = "k8s.io/kubernetes/test/e2e/storage/utils",
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)

View File

@@ -24,9 +24,14 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
uexec "k8s.io/utils/exec"
)
type KubeletOpt string
@@ -38,11 +43,51 @@ const (
KRestart KubeletOpt = "restart"
)
const (
// ClusterRole name for e2e test Priveledged Pod Security Policy User
podSecurityPolicyPrivilegedClusterRoleName = "e2e-test-privileged-psp"
)
// PodExec wraps RunKubectl to execute a bash cmd in target pod
func PodExec(pod *v1.Pod, bashExec string) (string, error) {
return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--", "/bin/sh", "-c", bashExec)
}
// VerifyExecInPodSucceed verifies bash cmd in target pod succeed
func VerifyExecInPodSucceed(pod *v1.Pod, bashExec string) {
_, err := PodExec(pod, bashExec)
if err != nil {
if err, ok := err.(uexec.CodeExitError); ok {
exitCode := err.ExitStatus()
Expect(err).NotTo(HaveOccurred(),
"%q should succeed, but failed with exit code %d and error message %q",
bashExec, exitCode, err)
} else {
Expect(err).NotTo(HaveOccurred(),
"%q should succeed, but failed with error message %q",
bashExec, err)
}
}
}
// VerifyExecInPodFail verifies bash cmd in target pod fail with certain exit code
func VerifyExecInPodFail(pod *v1.Pod, bashExec string, exitCode int) {
_, err := PodExec(pod, bashExec)
if err != nil {
if err, ok := err.(uexec.CodeExitError); ok {
actualExitCode := err.ExitStatus()
Expect(actualExitCode).To(Equal(exitCode),
"%q should fail with exit code %d, but failed with exit code %d and error message %q",
bashExec, exitCode, actualExitCode, err)
} else {
Expect(err).NotTo(HaveOccurred(),
"%q should fail with exit code %d, but failed with error message %q",
bashExec, exitCode, err)
}
}
Expect(err).To(HaveOccurred(), "%q should fail with exit code %d, but exit without error", bashExec, exitCode)
}
// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
// for the desired statues..
// - First issues the command via `systemctl`
@@ -244,7 +289,7 @@ func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
Containers: []v1.Container{
{
Name: "volume-tester",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
VolumeMounts: []v1.VolumeMount{
@@ -276,3 +321,123 @@ func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
}()
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
}
func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginName string) *v1.Pod {
podClient := c.CoreV1().Pods(ns)
provisionerPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "external-provisioner-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nfs-provisioner",
Image: "quay.io/kubernetes_incubator/nfs-provisioner:v2.1.0-k8s1.11",
SecurityContext: &v1.SecurityContext{
Capabilities: &v1.Capabilities{
Add: []v1.Capability{"DAC_READ_SEARCH"},
},
},
Args: []string{
"-provisioner=" + externalPluginName,
"-grace-period=0",
},
Ports: []v1.ContainerPort{
{Name: "nfs", ContainerPort: 2049},
{Name: "mountd", ContainerPort: 20048},
{Name: "rpcbind", ContainerPort: 111},
{Name: "rpcbind-udp", ContainerPort: 111, Protocol: v1.ProtocolUDP},
},
Env: []v1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
},
ImagePullPolicy: v1.PullIfNotPresent,
VolumeMounts: []v1.VolumeMount{
{
Name: "export-volume",
MountPath: "/export",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "export-volume",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
},
}
provisionerPod, err := podClient.Create(provisionerPod)
framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod))
By("locating the provisioner pod")
pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
return pod
}
func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface,
namespace string,
teardown bool,
saNames []string) {
bindingString := "Binding"
if teardown {
bindingString = "Unbinding"
}
roleBindingClient := client.RbacV1().RoleBindings(namespace)
for _, saName := range saNames {
By(fmt.Sprintf("%v priviledged Pod Security Policy to the service account %s", bindingString, saName))
binding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "psp-" + saName,
Namespace: namespace,
},
Subjects: []rbacv1.Subject{
{
Kind: rbacv1.ServiceAccountKind,
Name: saName,
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: podSecurityPolicyPrivilegedClusterRoleName,
APIGroup: "rbac.authorization.k8s.io",
},
}
roleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 2*time.Minute, func() (bool, error) {
_, err := roleBindingClient.Get(binding.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
continue
}
_, err = roleBindingClient.Create(binding)
framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
}
}