Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -9,6 +9,7 @@ go_library(
"ephemeral_volume.go",
"flexvolume.go",
"generic_persistent_volume-disruptive.go",
"in_tree_volumes.go",
"mounted_volume_resize.go",
"nfs_persistent_volume-disruptive.go",
"pd.go",
@@ -20,7 +21,7 @@ go_library(
"regional_pd.go",
"subpath.go",
"volume_expand.go",
"volume_io.go",
"volume_limits.go",
"volume_metrics.go",
"volume_provisioning.go",
"volumes.go",
@@ -29,6 +30,7 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/apis/storage/v1/util:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/kubelet/apis:go_default_library",
@@ -36,12 +38,44 @@ go_library(
"//pkg/util/slice:go_default_library",
"//pkg/util/version:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/version:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/crd:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/generated:go_default_library",
"//test/e2e/manifest:go_default_library",
"//test/e2e/storage/drivers:go_default_library",
"//test/e2e/storage/testsuites:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/e2e/storage/vsphere:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library",
@@ -51,32 +85,6 @@ go_library(
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/prometheus/common/model:go_default_library",
"//vendor/google.golang.org/api/googleapi:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/version:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
@@ -91,6 +99,9 @@ filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//test/e2e/storage/drivers:all-srcs",
"//test/e2e/storage/testpatterns:all-srcs",
"//test/e2e/storage/testsuites:all-srcs",
"//test/e2e/storage/utils:all-srcs",
"//test/e2e/storage/vsphere:all-srcs",
],

View File

@@ -21,12 +21,17 @@ package storage
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"time"
"k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
@@ -35,13 +40,17 @@ import (
"k8s.io/kubernetes/test/e2e/manifest"
. "github.com/onsi/ginkgo"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
csicrd "k8s.io/csi-api/pkg/crd"
)
var csiImageVersions = map[string]string{
"hostpathplugin": "v0.2.0",
"hostpathplugin": "canary", // TODO (verult) update tag once new hostpathplugin release is cut
"csi-attacher": "v0.2.0",
"csi-provisioner": "v0.2.1",
"driver-registrar": "v0.2.0",
"driver-registrar": "v0.3.0",
}
func csiContainerImage(image string) string {
@@ -117,7 +126,8 @@ func csiServiceAccount(
serviceAccountClient := client.CoreV1().ServiceAccounts(config.Namespace)
sa := &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
Name: serviceAccountName,
Namespace: config.Namespace,
},
}
@@ -154,14 +164,13 @@ func csiClusterRoleBindings(
By(fmt.Sprintf("%v cluster roles %v to the CSI service account %v", bindingString, clusterRolesNames, sa.GetName()))
clusterRoleBindingClient := client.RbacV1().ClusterRoleBindings()
for _, clusterRoleName := range clusterRolesNames {
binding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-" + clusterRoleName + "-" + config.Namespace + "-role-binding",
Name: clusterRoleName + "-" + config.Namespace + "-" + string(uuid.NewUUID()),
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Kind: rbacv1.ServiceAccountKind,
Name: sa.GetName(),
Namespace: sa.GetNamespace(),
},
@@ -239,6 +248,7 @@ func csiHostPathPod(
Args: []string{
"--v=5",
"--csi-address=/csi/csi.sock",
"--kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock",
},
Env: []v1.EnvVar{
{
@@ -255,6 +265,10 @@ func csiHostPathPod(
Name: "socket-dir",
MountPath: "/csi",
},
{
Name: "registration-dir",
MountPath: "/registration",
},
},
},
{
@@ -327,6 +341,15 @@ func csiHostPathPod(
},
},
},
{
Name: "registration-dir",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/kubelet/plugins",
Type: &hostPathType,
},
},
},
{
Name: "mountpoint-dir",
VolumeSource: v1.VolumeSource{
@@ -413,3 +436,87 @@ func deployGCEPDCSIDriver(
framework.ExpectNoError(err, "Failed to create DaemonSet: %v", nodeds.Name)
}
func createCSICRDs(c apiextensionsclient.Interface) {
By("Creating CSI CRDs")
crds := []*apiextensionsv1beta1.CustomResourceDefinition{
csicrd.CSIDriverCRD(),
csicrd.CSINodeInfoCRD(),
}
for _, crd := range crds {
_, err := c.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd)
framework.ExpectNoError(err, "Failed to create CSI CRD %q: %v", crd.Name, err)
}
}
func deleteCSICRDs(c apiextensionsclient.Interface) {
By("Deleting CSI CRDs")
csiDriverCRDName := csicrd.CSIDriverCRD().Name
csiNodeInfoCRDName := csicrd.CSINodeInfoCRD().Name
err := c.ApiextensionsV1beta1().CustomResourceDefinitions().Delete(csiDriverCRDName, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete CSI CRD %q: %v", csiDriverCRDName, err)
err = c.ApiextensionsV1beta1().CustomResourceDefinitions().Delete(csiNodeInfoCRDName, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete CSI CRD %q: %v", csiNodeInfoCRDName, err)
}
func shredFile(filePath string) {
if _, err := os.Stat(filePath); os.IsNotExist(err) {
framework.Logf("File %v was not found, skipping shredding", filePath)
return
}
framework.Logf("Shredding file %v", filePath)
_, _, err := framework.RunCmd("shred", "--remove", filePath)
if err != nil {
framework.Logf("Failed to shred file %v: %v", filePath, err)
}
if _, err := os.Stat(filePath); os.IsNotExist(err) {
framework.Logf("File %v successfully shredded", filePath)
return
}
// Shred failed Try to remove the file for good meausure
err = os.Remove(filePath)
framework.ExpectNoError(err, "Failed to remove service account file %s", filePath)
}
// createGCESecrets downloads the GCP IAM Key for the default compute service account
// and puts it in a secret for the GCE PD CSI Driver to consume
func createGCESecrets(client clientset.Interface, config framework.VolumeTestConfig) {
saEnv := "E2E_GOOGLE_APPLICATION_CREDENTIALS"
saFile := fmt.Sprintf("/tmp/%s/cloud-sa.json", string(uuid.NewUUID()))
os.MkdirAll(path.Dir(saFile), 0750)
defer os.Remove(path.Dir(saFile))
premadeSAFile, ok := os.LookupEnv(saEnv)
if !ok {
framework.Logf("Could not find env var %v, please either create cloud-sa"+
" secret manually or rerun test after setting %v to the filepath of"+
" the GCP Service Account to give to the GCE Persistent Disk CSI Driver", saEnv, saEnv)
return
}
framework.Logf("Found CI service account key at %v", premadeSAFile)
// Need to copy it saFile
stdout, stderr, err := framework.RunCmd("cp", premadeSAFile, saFile)
framework.ExpectNoError(err, "error copying service account key: %s\nstdout: %s\nstderr: %s", err, stdout, stderr)
defer shredFile(saFile)
// Create Secret with this Service Account
fileBytes, err := ioutil.ReadFile(saFile)
framework.ExpectNoError(err, "Failed to read file %v", saFile)
s := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "cloud-sa",
Namespace: config.Namespace,
},
Type: v1.SecretTypeOpaque,
Data: map[string][]byte{
filepath.Base(saFile): fileBytes,
},
}
_, err = client.CoreV1().Secrets(config.Namespace).Create(s)
framework.ExpectNoError(err, "Failed to create Secret %v", s.GetName())
}

View File

@@ -23,10 +23,18 @@ import (
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
csiv1alpha1 "k8s.io/csi-api/pkg/apis/csi/v1alpha1"
csiclient "k8s.io/csi-api/pkg/client/clientset/versioned"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
"crypto/sha256"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -46,22 +54,25 @@ type csiTestDriver interface {
var csiTestDrivers = map[string]func(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver{
"hostPath": initCSIHostpath,
// Feature tag to skip test in CI, pending fix of #62237
"[Feature: GCE PD CSI Plugin] gcePD": initCSIgcePD,
"gcePD": initCSIgcePD,
}
var _ = utils.SIGDescribe("CSI Volumes", func() {
var _ = utils.SIGDescribe("[Serial] CSI Volumes", func() {
f := framework.NewDefaultFramework("csi-mock-plugin")
var (
cs clientset.Interface
ns *v1.Namespace
node v1.Node
config framework.VolumeTestConfig
cs clientset.Interface
crdclient apiextensionsclient.Interface
csics csiclient.Interface
ns *v1.Namespace
node v1.Node
config framework.VolumeTestConfig
)
BeforeEach(func() {
cs = f.ClientSet
crdclient = f.APIExtensionsClientSet
csics = f.CSIClientSet
ns = f.Namespace
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
node = nodes.Items[rand.Intn(len(nodes.Items))]
@@ -73,13 +84,18 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
WaitForCompletion: true,
}
csiDriverRegistrarClusterRole(config)
createCSICRDs(crdclient)
})
AfterEach(func() {
deleteCSICRDs(crdclient)
})
for driverName, initCSIDriver := range csiTestDrivers {
curDriverName := driverName
curInitCSIDriver := initCSIDriver
Context(fmt.Sprintf("CSI plugin test using CSI driver: %s", curDriverName), func() {
Context(fmt.Sprintf("CSI plugin test using CSI driver: %s [Serial]", curDriverName), func() {
var (
driver csiTestDriver
)
@@ -102,8 +118,183 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
})
})
}
// Use [Serial], because there can be only one CSIDriver for csi-hostpath driver.
Context("CSI attach test using HostPath driver [Serial][Feature:CSISkipAttach]", func() {
var (
driver csiTestDriver
)
BeforeEach(func() {
driver = initCSIHostpath(f, config)
driver.createCSIDriver()
})
AfterEach(func() {
driver.cleanupCSIDriver()
})
tests := []struct {
name string
driverAttachable bool
driverExists bool
expectVolumeAttachment bool
}{
{
name: "non-attachable volume does not need VolumeAttachment",
driverAttachable: false,
driverExists: true,
expectVolumeAttachment: false,
},
{
name: "attachable volume needs VolumeAttachment",
driverAttachable: true,
driverExists: true,
expectVolumeAttachment: true,
},
{
name: "volume with no CSI driver needs VolumeAttachment",
driverExists: false,
expectVolumeAttachment: true,
},
}
for _, t := range tests {
test := t
It(test.name, func() {
if test.driverExists {
driver := createCSIDriver(csics, test.driverAttachable)
if driver != nil {
defer csics.CsiV1alpha1().CSIDrivers().Delete(driver.Name, nil)
}
}
By("Creating pod")
t := driver.createStorageClassTest(node)
class, claim, pod := startPausePod(cs, t, ns.Name)
if class != nil {
defer cs.StorageV1().StorageClasses().Delete(class.Name, nil)
}
if claim != nil {
defer cs.CoreV1().PersistentVolumeClaims(ns.Name).Delete(claim.Name, nil)
}
if pod != nil {
// Fully delete (=unmount) the pod before deleting CSI driver
defer framework.DeletePodWithWait(f, cs, pod)
}
if pod == nil {
return
}
err := framework.WaitForPodNameRunningInNamespace(cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "Failed to start pod: %v", err)
By("Checking if VolumeAttachment was created for the pod")
// Check that VolumeAttachment does not exist
handle := getVolumeHandle(cs, claim)
attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, t.provisioner, node.Name)))
attachmentName := fmt.Sprintf("csi-%x", attachmentHash)
_, err = cs.StorageV1beta1().VolumeAttachments().Get(attachmentName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
if test.expectVolumeAttachment {
framework.ExpectNoError(err, "Expected VolumeAttachment but none was found")
}
} else {
framework.ExpectNoError(err, "Failed to find VolumeAttachment")
}
}
if !test.expectVolumeAttachment {
Expect(err).To(HaveOccurred(), "Unexpected VolumeAttachment found")
}
})
}
})
})
func createCSIDriver(csics csiclient.Interface, attachable bool) *csiv1alpha1.CSIDriver {
By("Creating CSIDriver instance")
driver := &csiv1alpha1.CSIDriver{
ObjectMeta: metav1.ObjectMeta{
Name: "csi-hostpath",
},
Spec: csiv1alpha1.CSIDriverSpec{
AttachRequired: &attachable,
},
}
driver, err := csics.CsiV1alpha1().CSIDrivers().Create(driver)
framework.ExpectNoError(err, "Failed to create CSIDriver: %v", err)
return driver
}
func getVolumeHandle(cs clientset.Interface, claim *v1.PersistentVolumeClaim) string {
// re-get the claim to the the latest state with bound volume
claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
if err != nil {
framework.ExpectNoError(err, "Cannot get PVC")
return ""
}
pvName := claim.Spec.VolumeName
pv, err := cs.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err != nil {
framework.ExpectNoError(err, "Cannot get PV")
return ""
}
if pv.Spec.CSI == nil {
Expect(pv.Spec.CSI).NotTo(BeNil())
return ""
}
return pv.Spec.CSI.VolumeHandle
}
func startPausePod(cs clientset.Interface, t storageClassTest, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
class := newStorageClass(t, ns, "")
class, err := cs.StorageV1().StorageClasses().Create(class)
framework.ExpectNoError(err, "Failed to create class : %v", err)
claim := newClaim(t, ns, "")
claim.Spec.StorageClassName = &class.Name
claim, err = cs.CoreV1().PersistentVolumeClaims(ns).Create(claim)
framework.ExpectNoError(err, "Failed to create claim: %v", err)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-volume-tester-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "volume-tester",
Image: imageutils.GetE2EImage(imageutils.Pause),
VolumeMounts: []v1.VolumeMount{
{
Name: "my-volume",
MountPath: "/mnt/test",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "my-volume",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claim.Name,
ReadOnly: false,
},
},
},
},
},
}
if len(t.nodeName) != 0 {
pod.Spec.NodeName = t.nodeName
}
pod, err = cs.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err, "Failed to create pod: %v", err)
return class, claim, pod
}
type hostpathCSIDriver struct {
combinedClusterRoleNames []string
serviceAccount *v1.ServiceAccount
@@ -168,9 +359,11 @@ type gcePDCSIDriver struct {
func initCSIgcePD(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver {
cs := f.ClientSet
framework.SkipUnlessProviderIs("gce", "gke")
// Currently you will need to manually add the required GCP Credentials as a secret "cloud-sa"
// kubectl create generic cloud-sa --from-file=PATH/TO/cloud-sa.json --namespace={{config.Namespace}}
// TODO(#62561): Inject the necessary credentials automatically to the driver containers in e2e test
framework.SkipIfMultizone(cs)
// TODO(#62561): Use credentials through external pod identity when that goes GA instead of downloading keys.
createGCESecrets(cs, config)
framework.SkipUnlessSecretExistsAfterWait(cs, "cloud-sa", config.Namespace, 3*time.Minute)
return &gcePDCSIDriver{
@@ -187,13 +380,10 @@ func initCSIgcePD(f *framework.Framework, config framework.VolumeTestConfig) csi
}
func (g *gcePDCSIDriver) createStorageClassTest(node v1.Node) storageClassTest {
nodeZone, ok := node.GetLabels()[kubeletapis.LabelZoneFailureDomain]
Expect(ok).To(BeTrue(), "Could not get label %v from node %v", kubeletapis.LabelZoneFailureDomain, node.GetName())
return storageClassTest{
name: "csi-gce-pd",
provisioner: "csi-gce-pd",
parameters: map[string]string{"type": "pd-standard", "zone": nodeZone},
name: "com.google.csi.gcepd",
provisioner: "com.google.csi.gcepd",
parameters: map[string]string{"type": "pd-standard"},
claimSize: "5Gi",
expectedSize: "5Gi",
nodeName: node.Name,
@@ -209,6 +399,8 @@ func (g *gcePDCSIDriver) createCSIDriver() {
g.nodeServiceAccount = csiServiceAccount(cs, config, "gce-node", false /* teardown */)
csiClusterRoleBindings(cs, config, false /* teardown */, g.controllerServiceAccount, g.controllerClusterRoles)
csiClusterRoleBindings(cs, config, false /* teardown */, g.nodeServiceAccount, g.nodeClusterRoles)
utils.PrivilegedTestPSPClusterRoleBinding(cs, config.Namespace,
false /* teardown */, []string{g.controllerServiceAccount.Name, g.nodeServiceAccount.Name})
deployGCEPDCSIDriver(cs, config, false /* teardown */, f, g.nodeServiceAccount, g.controllerServiceAccount)
}
@@ -220,6 +412,8 @@ func (g *gcePDCSIDriver) cleanupCSIDriver() {
deployGCEPDCSIDriver(cs, config, true /* teardown */, f, g.nodeServiceAccount, g.controllerServiceAccount)
csiClusterRoleBindings(cs, config, true /* teardown */, g.controllerServiceAccount, g.controllerClusterRoles)
csiClusterRoleBindings(cs, config, true /* teardown */, g.nodeServiceAccount, g.nodeClusterRoles)
utils.PrivilegedTestPSPClusterRoleBinding(cs, config.Namespace,
true /* teardown */, []string{g.controllerServiceAccount.Name, g.nodeServiceAccount.Name})
csiServiceAccount(cs, config, "gce-controller", true /* teardown */)
csiServiceAccount(cs, config, "gce-node", true /* teardown */)
}

View File

@@ -0,0 +1,43 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"base.go",
"in_tree.go",
],
importpath = "k8s.io/kubernetes/test/e2e/storage/drivers",
visibility = ["//visibility:public"],
deps = [
"//pkg/kubelet/apis:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/storage/testpatterns:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/e2e/storage/vsphere:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,176 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package drivers
import (
"fmt"
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
// TestDriver represents an interface for a driver to be tested in TestSuite
type TestDriver interface {
// GetDriverInfo returns DriverInfo for the TestDriver
GetDriverInfo() *DriverInfo
// CreateDriver creates all driver resources that is required for TestDriver method
// except CreateVolume
CreateDriver()
// CreateDriver cleanup all the resources that is created in CreateDriver
CleanupDriver()
// SkipUnsupportedTest skips test in Testpattern is not suitable to test with the TestDriver
SkipUnsupportedTest(testpatterns.TestPattern)
}
// PreprovisionedVolumeTestDriver represents an interface for a TestDriver that has pre-provisioned volume
type PreprovisionedVolumeTestDriver interface {
TestDriver
// CreateVolume creates a pre-provisioned volume.
CreateVolume(testpatterns.TestVolType) interface{}
// DeleteVolume deletes a volume that is created in CreateVolume
DeleteVolume(testpatterns.TestVolType, interface{})
}
// InlineVolumeTestDriver represents an interface for a TestDriver that supports InlineVolume
type InlineVolumeTestDriver interface {
PreprovisionedVolumeTestDriver
// GetVolumeSource returns a volumeSource for inline volume.
// It will set readOnly and fsType to the volumeSource, if TestDriver supports both of them.
// It will return nil, if the TestDriver doesn't support either of the parameters.
GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource
}
// PreprovisionedPVTestDriver represents an interface for a TestDriver that supports PreprovisionedPV
type PreprovisionedPVTestDriver interface {
PreprovisionedVolumeTestDriver
// GetPersistentVolumeSource returns a PersistentVolumeSource for pre-provisioned Persistent Volume.
// It will set readOnly and fsType to the PersistentVolumeSource, if TestDriver supports both of them.
// It will return nil, if the TestDriver doesn't support either of the parameters.
GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource
}
// DynamicPVTestDriver represents an interface for a TestDriver that supports DynamicPV
type DynamicPVTestDriver interface {
TestDriver
// GetDynamicProvisionStorageClass returns a StorageClass dynamic provision Persistent Volume.
// It will set fsType to the StorageClass, if TestDriver supports it.
// It will return nil, if the TestDriver doesn't support it.
GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass
}
// DriverInfo represents a combination of parameters to be used in implementation of TestDriver
type DriverInfo struct {
Name string // Name of the driver
FeatureTag string // FeatureTag for the driver
MaxFileSize int64 // Max file size to be tested for this driver
SupportedFsType sets.String // Map of string for supported fs type
IsPersistent bool // Flag to represent whether it provides persistency
IsFsGroupSupported bool // Flag to represent whether it supports fsGroup
IsBlockSupported bool // Flag to represent whether it supports Block Volume
// Parameters below will be set inside test loop by using SetCommonDriverParameters.
// Drivers that implement TestDriver is required to set all the above parameters
// and return DriverInfo on GetDriverInfo() call.
Framework *framework.Framework // Framework for the test
Config framework.VolumeTestConfig // VolumeTestConfig for thet test
}
// GetDriverNameWithFeatureTags returns driver name with feature tags
// For example)
// - [Driver: nfs]
// - [Driver: rbd][Feature:Volumes]
func GetDriverNameWithFeatureTags(driver TestDriver) string {
dInfo := driver.GetDriverInfo()
return fmt.Sprintf("[Driver: %s]%s", dInfo.Name, dInfo.FeatureTag)
}
func CreateVolume(driver TestDriver, volType testpatterns.TestVolType) interface{} {
// Create Volume for test unless dynamicPV test
switch volType {
case testpatterns.InlineVolume:
fallthrough
case testpatterns.PreprovisionedPV:
if pDriver, ok := driver.(PreprovisionedVolumeTestDriver); ok {
return pDriver.CreateVolume(volType)
}
case testpatterns.DynamicPV:
// No need to create volume
default:
framework.Failf("Invalid volType specified: %v", volType)
}
return nil
}
func DeleteVolume(driver TestDriver, volType testpatterns.TestVolType, testResource interface{}) {
// Delete Volume for test unless dynamicPV test
switch volType {
case testpatterns.InlineVolume:
fallthrough
case testpatterns.PreprovisionedPV:
if pDriver, ok := driver.(PreprovisionedVolumeTestDriver); ok {
pDriver.DeleteVolume(volType, testResource)
}
case testpatterns.DynamicPV:
// No need to delete volume
default:
framework.Failf("Invalid volType specified: %v", volType)
}
}
// SetCommonDriverParameters sets a common driver parameters to TestDriver
// This function is intended to be called in BeforeEach() inside test loop.
func SetCommonDriverParameters(
driver TestDriver,
f *framework.Framework,
config framework.VolumeTestConfig,
) {
dInfo := driver.GetDriverInfo()
dInfo.Framework = f
dInfo.Config = config
}
func getStorageClass(
provisioner string,
parameters map[string]string,
bindingMode *storagev1.VolumeBindingMode,
ns string,
suffix string,
) *storagev1.StorageClass {
if bindingMode == nil {
defaultBindingMode := storagev1.VolumeBindingImmediate
bindingMode = &defaultBindingMode
}
return &storagev1.StorageClass{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: metav1.ObjectMeta{
// Name must be unique, so let's base it on namespace name
Name: ns + "-" + suffix,
},
Provisioner: provisioner,
Parameters: parameters,
VolumeBindingMode: bindingMode,
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -76,10 +76,22 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
gitVolumeName := "git-volume"
gitVolumeMountPath := "/etc/git-volume"
gitURL, gitRepo, gitCleanup := createGitServer(f)
defer gitCleanup()
configMapVolumeName := "configmap-volume"
configMapVolumeMountPath := "/etc/configmap-volume"
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
BinaryData: map[string][]byte{
"data-1": []byte("value-1\n"),
},
}
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@@ -96,11 +108,12 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
},
},
{
Name: gitVolumeName,
Name: configMapVolumeName,
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{
Repository: gitURL,
Directory: gitRepo,
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
@@ -116,8 +129,8 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
ReadOnly: true,
},
{
Name: gitVolumeName,
MountPath: gitVolumeMountPath,
Name: configMapVolumeName,
MountPath: configMapVolumeMountPath,
},
},
},
@@ -131,9 +144,13 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil); err != nil {
framework.Failf("unable to delete secret %v: %v", secret.Name, err)
}
By("Cleaning up the git vol pod")
By("Cleaning up the configmap")
if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMap.Name, nil); err != nil {
framework.Failf("unable to delete configmap %v: %v", configMap.Name, err)
}
By("Cleaning up the pod")
if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete git vol pod %v: %v", pod.Name, err)
framework.Failf("unable to delete pod %v: %v", pod.Name, err)
}
}()
})
@@ -353,7 +370,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume
Containers: []v1.Container{
{
Name: "test-container",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sleep", "10000"},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{

View File

@@ -27,11 +27,18 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var (
volumePath = "/test-volume"
volumeName = "test-volume"
mountImage = imageutils.GetE2EImage(imageutils.Mounttest)
)
var _ = utils.SIGDescribe("Ephemeralstorage", func() {
var (
c clientset.Interface

View File

@@ -47,6 +47,7 @@ const (
gciVolumePluginDir = "/home/kubernetes/flexvolume"
gciVolumePluginDirLegacy = "/etc/srv/kubernetes/kubelet-plugins/volume/exec"
gciVolumePluginDirVersion = "1.10.0"
detachTimeout = 10 * time.Second
)
// testFlexVolume tests that a client pod using a given flexvolume driver
@@ -72,62 +73,64 @@ func testFlexVolume(driver string, cs clientset.Interface, config framework.Volu
// installFlex installs the driver found at filePath on the node, and restarts
// kubelet if 'restart' is true. If node is nil, installs on the master, and restarts
// controller-manager if 'restart' is true.
func installFlex(c clientset.Interface, node *v1.Node, vendor, driver, filePath string, restart bool) {
func installFlex(c clientset.Interface, node *v1.Node, vendor, driver, filePath string) {
flexDir := getFlexDir(c, node, vendor, driver)
flexFile := path.Join(flexDir, driver)
host := ""
var err error
if node != nil {
host = framework.GetNodeExternalIP(node)
host, err = framework.GetNodeExternalIP(node)
if err != nil {
host, err = framework.GetNodeInternalIP(node)
}
} else {
host = net.JoinHostPort(framework.GetMasterHost(), sshPort)
masterHostWithPort := framework.GetMasterHost()
hostName := getHostFromHostPort(masterHostWithPort)
host = net.JoinHostPort(hostName, sshPort)
}
framework.ExpectNoError(err)
cmd := fmt.Sprintf("sudo mkdir -p %s", flexDir)
sshAndLog(cmd, host)
sshAndLog(cmd, host, true /*failOnError*/)
data := generated.ReadOrDie(filePath)
cmd = fmt.Sprintf("sudo tee <<'EOF' %s\n%s\nEOF", flexFile, string(data))
sshAndLog(cmd, host)
sshAndLog(cmd, host, true /*failOnError*/)
cmd = fmt.Sprintf("sudo chmod +x %s", flexFile)
sshAndLog(cmd, host)
if !restart {
return
}
if node != nil {
err := framework.RestartKubelet(host)
framework.ExpectNoError(err)
err = framework.WaitForKubeletUp(host)
framework.ExpectNoError(err)
} else {
err := framework.RestartControllerManager()
framework.ExpectNoError(err)
err = framework.WaitForControllerManagerUp()
framework.ExpectNoError(err)
}
sshAndLog(cmd, host, true /*failOnError*/)
}
func uninstallFlex(c clientset.Interface, node *v1.Node, vendor, driver string) {
flexDir := getFlexDir(c, node, vendor, driver)
host := ""
var err error
if node != nil {
host = framework.GetNodeExternalIP(node)
host, err = framework.GetNodeExternalIP(node)
if err != nil {
host, err = framework.GetNodeInternalIP(node)
}
} else {
host = net.JoinHostPort(framework.GetMasterHost(), sshPort)
masterHostWithPort := framework.GetMasterHost()
hostName := getHostFromHostPort(masterHostWithPort)
host = net.JoinHostPort(hostName, sshPort)
}
if host == "" {
framework.Failf("Error getting node ip : %v", err)
}
cmd := fmt.Sprintf("sudo rm -r %s", flexDir)
sshAndLog(cmd, host)
sshAndLog(cmd, host, false /*failOnError*/)
}
func getFlexDir(c clientset.Interface, node *v1.Node, vendor, driver string) string {
volumePluginDir := defaultVolumePluginDir
if framework.ProviderIs("gce") {
if node == nil && framework.MasterOSDistroIs("gci") {
if node == nil && framework.MasterOSDistroIs("gci", "ubuntu") {
v, err := getMasterVersion(c)
if err != nil {
framework.Failf("Error getting master version: %v", err)
@@ -138,7 +141,7 @@ func getFlexDir(c clientset.Interface, node *v1.Node, vendor, driver string) str
} else {
volumePluginDir = gciVolumePluginDirLegacy
}
} else if node != nil && framework.NodeOSDistroIs("gci") {
} else if node != nil && framework.NodeOSDistroIs("gci", "ubuntu") {
if getNodeVersion(node).AtLeast(versionutil.MustParseGeneric(gciVolumePluginDirVersion)) {
volumePluginDir = gciVolumePluginDir
} else {
@@ -150,11 +153,11 @@ func getFlexDir(c clientset.Interface, node *v1.Node, vendor, driver string) str
return flexDir
}
func sshAndLog(cmd, host string) {
func sshAndLog(cmd, host string, failOnError bool) {
result, err := framework.SSH(cmd, host, framework.TestContext.Provider)
framework.LogSSHResult(result)
framework.ExpectNoError(err)
if result.Code != 0 {
if result.Code != 0 && failOnError {
framework.Failf("%s returned non-zero, stderr: %s", cmd, result.Stderr)
}
}
@@ -177,7 +180,18 @@ func getNodeVersion(node *v1.Node) *versionutil.Version {
return versionutil.MustParseSemantic(node.Status.NodeInfo.KubeletVersion)
}
var _ = utils.SIGDescribe("Flexvolumes [Disruptive]", func() {
func getHostFromHostPort(hostPort string) string {
// try to split host and port
var host string
var err error
if host, _, err = net.SplitHostPort(hostPort); err != nil {
// if SplitHostPort returns an error, the entire hostport is considered as host
host = hostPort
}
return host
}
var _ = utils.SIGDescribe("Flexvolumes", func() {
f := framework.NewDefaultFramework("flexvolume")
// note that namespace deletion is handled by delete-namespace flag
@@ -189,9 +203,9 @@ var _ = utils.SIGDescribe("Flexvolumes [Disruptive]", func() {
var suffix string
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce")
framework.SkipUnlessMasterOSDistroIs("gci")
framework.SkipUnlessNodeOSDistroIs("debian", "gci")
framework.SkipUnlessProviderIs("gce", "local")
framework.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom")
framework.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom")
framework.SkipUnlessSSHKeyPresent()
cs = f.ClientSet
@@ -211,7 +225,7 @@ var _ = utils.SIGDescribe("Flexvolumes [Disruptive]", func() {
driverInstallAs := driver + "-" + suffix
By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs))
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver), true /* restart */)
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver))
testFlexVolume(driverInstallAs, cs, config, f)
@@ -229,9 +243,9 @@ var _ = utils.SIGDescribe("Flexvolumes [Disruptive]", func() {
driverInstallAs := driver + "-" + suffix
By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs))
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver), true /* restart */)
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver))
By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs))
installFlex(cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver), true /* restart */)
installFlex(cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver))
testFlexVolume(driverInstallAs, cs, config, f)
@@ -240,27 +254,12 @@ var _ = utils.SIGDescribe("Flexvolumes [Disruptive]", func() {
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
}
// Detach might occur after pod deletion. Wait before deleting driver.
time.Sleep(detachTimeout)
By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name))
uninstallFlex(cs, &node, "k8s", driverInstallAs)
By(fmt.Sprintf("uninstalling flexvolume %s from master", driverInstallAs))
uninstallFlex(cs, nil, "k8s", driverInstallAs)
})
It("should install plugin without kubelet restart", func() {
driver := "dummy"
driverInstallAs := driver + "-" + suffix
By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs))
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver), false /* restart */)
testFlexVolume(driverInstallAs, cs, config, f)
By("waiting for flex client pod to terminate")
if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
}
By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name))
uninstallFlex(cs, &node, "k8s", driverInstallAs)
})
})

View File

@@ -61,10 +61,11 @@ var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() {
var (
clientPod *v1.Pod
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
)
BeforeEach(func() {
framework.Logf("Initializing pod and pvcs for test")
clientPod, pvc = createPodPVCFromSC(f, c, ns)
clientPod, pvc, pv = createPodPVCFromSC(f, c, ns)
})
for _, test := range disruptiveTestTable {
func(t disruptiveTest) {
@@ -76,13 +77,13 @@ var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() {
}
AfterEach(func() {
framework.Logf("Tearing down test spec")
tearDownTestCase(c, f, ns, clientPod, pvc, nil)
tearDownTestCase(c, f, ns, clientPod, pvc, pv, false)
pvc, clientPod = nil, nil
})
})
})
func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string) (*v1.Pod, *v1.PersistentVolumeClaim) {
func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string) (*v1.Pod, *v1.PersistentVolumeClaim, *v1.PersistentVolume) {
var err error
test := storageClassTest{
name: "default",
@@ -99,5 +100,5 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string
By("Creating a pod with dynamically provisioned volume")
pod, err := framework.CreateNginxPod(c, ns, nil, pvcClaims)
Expect(err).NotTo(HaveOccurred(), "While creating pods for kubelet restart test")
return pod, pvc
return pod, pvc, pvs[0]
}

View File

@@ -0,0 +1,91 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
. "github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
// List of testDrivers to be executed in below loop
var testDrivers = []func() drivers.TestDriver{
drivers.InitNFSDriver,
drivers.InitGlusterFSDriver,
drivers.InitISCSIDriver,
drivers.InitRbdDriver,
drivers.InitCephFSDriver,
drivers.InitHostPathDriver,
drivers.InitHostPathSymlinkDriver,
drivers.InitEmptydirDriver,
drivers.InitCinderDriver,
drivers.InitGcePdDriver,
drivers.InitVSphereDriver,
drivers.InitAzureDriver,
drivers.InitAwsDriver,
}
// List of testSuites to be executed in below loop
var testSuites = []func() testsuites.TestSuite{
testsuites.InitVolumesTestSuite,
testsuites.InitVolumeIOTestSuite,
testsuites.InitVolumeModeTestSuite,
testsuites.InitSubPathTestSuite,
}
// This executes testSuites for in-tree volumes.
var _ = utils.SIGDescribe("In-tree Volumes", func() {
f := framework.NewDefaultFramework("volumes")
var (
ns *v1.Namespace
config framework.VolumeTestConfig
)
BeforeEach(func() {
ns = f.Namespace
config = framework.VolumeTestConfig{
Namespace: ns.Name,
Prefix: "volume",
}
})
for _, initDriver := range testDrivers {
curDriver := initDriver()
Context(fmt.Sprintf(drivers.GetDriverNameWithFeatureTags(curDriver)), func() {
driver := curDriver
BeforeEach(func() {
// setupDriver
drivers.SetCommonDriverParameters(driver, f, config)
driver.CreateDriver()
})
AfterEach(func() {
// Cleanup driver
driver.CleanupDriver()
})
testsuites.RunTestSuite(f, config, driver, testSuites)
})
}
})

View File

@@ -84,13 +84,15 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
StorageClassName: &emptyStorageClass,
}
// Get the first ready node IP that is not hosting the NFS pod.
var err error
if clientNodeIP == "" {
framework.Logf("Designating test node")
nodes := framework.GetReadySchedulableNodesOrDie(c)
for _, node := range nodes.Items {
if node.Name != nfsServerPod.Spec.NodeName {
clientNode = &node
clientNodeIP = framework.GetNodeExternalIP(clientNode)
clientNodeIP, err = framework.GetNodeExternalIP(clientNode)
framework.ExpectNoError(err)
break
}
}
@@ -206,7 +208,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
AfterEach(func() {
framework.Logf("Tearing down test spec")
tearDownTestCase(c, f, ns, clientPod, pvc, pv)
tearDownTestCase(c, f, ns, clientPod, pvc, pv, true /* force PV delete */)
pv, pvc, clientPod = nil, nil, nil
})
@@ -275,11 +277,14 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framew
}
// tearDownTestCase destroy resources created by initTestCase.
func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string, client *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string, client *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, forceDeletePV bool) {
// Ignore deletion errors. Failing on them will interrupt test cleanup.
framework.DeletePodWithWait(f, c, client)
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
if pv != nil {
if forceDeletePV && pv != nil {
framework.DeletePersistentVolume(c, pv.Name)
return
}
err := framework.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, 5*time.Minute)
framework.ExpectNoError(err, "Persistent Volume %v not deleted by dynamic provisioner", pv.Name)
}

View File

@@ -40,6 +40,7 @@ import (
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
@@ -526,7 +527,7 @@ func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, num
if numContainers > 1 {
containers[i].Name = fmt.Sprintf("mycontainer%v", i+1)
}
containers[i].Image = "busybox"
containers[i].Image = imageutils.GetE2EImage(imageutils.BusyBox)
containers[i].Command = []string{"sleep", "6000"}
containers[i].VolumeMounts = make([]v1.VolumeMount, len(diskNames))
for k := range diskNames {
@@ -575,7 +576,7 @@ func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, num
return pod
}
// Waits for specified PD to to detach from specified hostName
// Waits for specified PD to detach from specified hostName
func waitForPDDetach(diskName string, nodeName types.NodeName) error {
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
framework.Logf("Waiting for GCE PD %q to detach from node %q.", diskName, nodeName)

View File

@@ -51,6 +51,7 @@ import (
type localTestConfig struct {
ns string
nodes []v1.Node
nodeExecPods map[string]*v1.Pod
node0 *v1.Node
client clientset.Interface
scName string
@@ -77,8 +78,12 @@ const (
GCELocalSSDVolumeType localVolumeType = "gce-localssd-scsi-fs"
// Creates a local file, formats it, and maps it as a block device.
BlockLocalVolumeType localVolumeType = "block"
// Creates a local file, formats it, and mounts it to use as local volume.
BlockFsLocalVolumeType localVolumeType = "blockfs"
// Creates a local file serving as the backing for block device., formats it,
// and mounts it to use as FS mode local volume.
BlockFsWithFormatLocalVolumeType localVolumeType = "blockfswithformat"
// Creates a local file serving as the backing for block device. do not format it manually,
// and mounts it to use as FS mode local volume.
BlockFsWithoutFormatLocalVolumeType localVolumeType = "blockfswithoutformat"
)
var setupLocalVolumeMap = map[localVolumeType]func(*localTestConfig, *v1.Node) *localTestVolume{
@@ -89,7 +94,8 @@ var setupLocalVolumeMap = map[localVolumeType]func(*localTestConfig, *v1.Node) *
DirectoryBindMountedLocalVolumeType: setupLocalVolumeDirectoryBindMounted,
DirectoryLinkBindMountedLocalVolumeType: setupLocalVolumeDirectoryLinkBindMounted,
BlockLocalVolumeType: setupLocalVolumeBlock,
BlockFsLocalVolumeType: setupLocalVolumeBlockFs,
BlockFsWithFormatLocalVolumeType: setupLocalVolumeBlockFsWithFormat,
BlockFsWithoutFormatLocalVolumeType: setupLocalVolumeBlockFsWithoutFormat,
}
var cleanupLocalVolumeMap = map[localVolumeType]func(*localTestConfig, *localTestVolume){
@@ -100,7 +106,8 @@ var cleanupLocalVolumeMap = map[localVolumeType]func(*localTestConfig, *localTes
DirectoryBindMountedLocalVolumeType: cleanupLocalVolumeDirectoryBindMounted,
DirectoryLinkBindMountedLocalVolumeType: cleanupLocalVolumeDirectoryLinkBindMounted,
BlockLocalVolumeType: cleanupLocalVolumeBlock,
BlockFsLocalVolumeType: cleanupLocalVolumeBlockFs,
BlockFsWithFormatLocalVolumeType: cleanupLocalVolumeBlockFsWithFormat,
BlockFsWithoutFormatLocalVolumeType: cleanupLocalVolumeBlockFsWithoutFormat,
}
type localTestVolume struct {
@@ -176,8 +183,6 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
)
BeforeEach(func() {
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
// Get all the schedulable nodes
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodes.Items)).NotTo(BeZero(), "No available nodes for scheduling")
@@ -197,6 +202,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
ns: f.Namespace.Name,
client: f.ClientSet,
nodes: nodes.Items[:maxLen],
nodeExecPods: make(map[string]*v1.Pod, maxLen),
node0: node0,
scName: scName,
ssTester: ssTester,
@@ -224,7 +230,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
BeforeEach(func() {
if testVolType == GCELocalSSDVolumeType {
SkipUnlessLocalSSDExists("scsi", "fs", config.node0)
SkipUnlessLocalSSDExists(config, "scsi", "fs", config.node0)
}
setupStorageClass(config, &testMode)
testVols := setupLocalVolumesPVCsPVs(config, testVolType, config.node0, 1, testMode)
@@ -247,6 +253,11 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
pod1, pod1Err = createLocalPod(config, testVol, nil)
Expect(pod1Err).NotTo(HaveOccurred())
verifyLocalPod(config, testVol, pod1, config.node0.Name)
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
By("Writing in pod1")
podRWCmdExec(pod1, writeCmd)
})
AfterEach(func() {
@@ -256,16 +267,16 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
It("should be able to mount volume and read from pod1", func() {
By("Reading in pod1")
// testFileContent was written during setupLocalVolume
// testFileContent was written in BeforeEach
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVolType)
})
It("should be able to mount volume and write from pod1", func() {
// testFileContent was written during setupLocalVolume
// testFileContent was written in BeforeEach
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVolType)
By("Writing in pod1")
writeCmd, _ := createWriteAndReadCmds(volumeDir, testFile, testVol.hostDir /*writeTestFileContent*/, testVolType)
writeCmd := createWriteCmd(volumeDir, testFile, testVol.hostDir /*writeTestFileContent*/, testVolType)
podRWCmdExec(pod1, writeCmd)
})
})
@@ -346,12 +357,12 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
Context("Local volume that cannot be mounted [Slow]", func() {
// TODO:
// - check for these errors in unit tests intead
// - check for these errors in unit tests instead
It("should fail due to non-existent path", func() {
ep := &eventPatterns{
reason: "FailedMount",
pattern: make([]string, 2)}
ep.pattern = append(ep.pattern, "MountVolume.SetUp failed")
ep.pattern = append(ep.pattern, "MountVolume.NewMounter initialization failed")
testVol := &localTestVolume{
node: config.node0,
@@ -461,8 +472,8 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
// Delete the persistent volume claim: file will be cleaned up and volume be re-created.
By("Deleting the persistent volume claim to clean up persistent volume and re-create one")
writeCmd, _ := createWriteAndReadCmds(volumePath, testFile, testFileContent, DirectoryLocalVolumeType)
err = framework.IssueSSHCommand(writeCmd, framework.TestContext.Provider, config.node0)
writeCmd := createWriteCmd(volumePath, testFile, testFileContent, DirectoryLocalVolumeType)
err = issueNodeCommand(config, writeCmd, config.node0)
Expect(err).NotTo(HaveOccurred())
err = config.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
@@ -472,7 +483,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
Expect(err).NotTo(HaveOccurred())
Expect(newPV.UID).NotTo(Equal(oldPV.UID))
fileDoesntExistCmd := createFileDoesntExistCmd(volumePath, testFile)
err = framework.IssueSSHCommand(fileDoesntExistCmd, framework.TestContext.Provider, config.node0)
err = issueNodeCommand(config, fileDoesntExistCmd, config.node0)
Expect(err).NotTo(HaveOccurred())
By("Deleting provisioner daemonset")
@@ -483,7 +494,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
directoryPath := filepath.Join(config.discoveryDir, "notbindmount")
By("Creating a directory, not bind mounted, in discovery directory")
mkdirCmd := fmt.Sprintf("mkdir -p %v -m 777", directoryPath)
err := framework.IssueSSHCommand(mkdirCmd, framework.TestContext.Provider, config.node0)
err := issueNodeCommand(config, mkdirCmd, config.node0)
Expect(err).NotTo(HaveOccurred())
By("Starting a provisioner daemonset")
@@ -504,7 +515,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
By("Deleting provisioner daemonset")
deleteProvisionerDaemonset(config)
})
It("should discover dynamicly created local persistent volume mountpoint in discovery directory", func() {
It("should discover dynamically created local persistent volume mountpoint in discovery directory", func() {
By("Starting a provisioner daemonset")
createProvisionerDaemonset(config)
@@ -524,17 +535,14 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
})
})
Context("StatefulSet with pod anti-affinity", func() {
Context("StatefulSet with pod affinity", func() {
var testVols map[string][]*localTestVolume
const (
ssReplicas = 3
volsPerNode = 2
volsPerNode = 6
)
BeforeEach(func() {
if len(config.nodes) < ssReplicas {
framework.Skipf("Runs only when number of nodes >= %v", ssReplicas)
}
setupStorageClass(config, &waitMode)
testVols = map[string][]*localTestVolume{}
@@ -553,10 +561,34 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
cleanupStorageClass(config)
})
It("should use volumes spread across nodes", func() {
It("should use volumes spread across nodes when pod has anti-affinity", func() {
if len(config.nodes) < ssReplicas {
framework.Skipf("Runs only when number of nodes >= %v", ssReplicas)
}
By("Creating a StatefulSet with pod anti-affinity on nodes")
ss := createStatefulSet(config, ssReplicas, volsPerNode)
validateStatefulSet(config, ss)
ss := createStatefulSet(config, ssReplicas, volsPerNode, true, false)
validateStatefulSet(config, ss, true)
})
It("should use volumes on one node when pod has affinity", func() {
By("Creating a StatefulSet with pod affinity on nodes")
ss := createStatefulSet(config, ssReplicas, volsPerNode/ssReplicas, false, false)
validateStatefulSet(config, ss, false)
})
It("should use volumes spread across nodes when pod management is parallel and pod has anti-affinity", func() {
if len(config.nodes) < ssReplicas {
framework.Skipf("Runs only when number of nodes >= %v", ssReplicas)
}
By("Creating a StatefulSet with pod anti-affinity on nodes")
ss := createStatefulSet(config, ssReplicas, 1, true, true)
validateStatefulSet(config, ss, true)
})
It("should use volumes on one node when pod management is parallel and pod has affinity", func() {
By("Creating a StatefulSet with pod affinity on nodes")
ss := createStatefulSet(config, ssReplicas, 1, false, true)
validateStatefulSet(config, ss, false)
})
})
@@ -725,8 +757,6 @@ func testPodWithNodeConflict(config *localTestConfig, testVolType localVolumeTyp
err = framework.WaitForPodNameUnschedulableInNamespace(config.client, pod.Name, pod.Namespace)
Expect(err).NotTo(HaveOccurred())
cleanupLocalVolumes(config, []*localTestVolume{testVol})
}
type eventPatterns struct {
@@ -760,7 +790,12 @@ func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) {
Expect(pod1Err).NotTo(HaveOccurred())
verifyLocalPod(config, testVol, pod1, config.node0.Name)
// testFileContent was written during setupLocalVolume
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
By("Writing in pod1")
podRWCmdExec(pod1, writeCmd)
// testFileContent was written after creating pod1
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
By("Creating pod2 to read from the PV")
@@ -768,16 +803,16 @@ func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) {
Expect(pod2Err).NotTo(HaveOccurred())
verifyLocalPod(config, testVol, pod2, config.node0.Name)
// testFileContent was written during setupLocalVolume
// testFileContent was written after creating pod1
testReadFileContent(volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType)
writeCmd := createWriteCmd(volumeDir, testFile, testVol.hostDir /*writeTestFileContent*/, testVol.localVolumeType)
writeCmd = createWriteCmd(volumeDir, testFile, testVol.hostDir /*writeTestFileContent*/, testVol.localVolumeType)
By("Writing in pod1")
podRWCmdExec(pod1, writeCmd)
By("Writing in pod2")
podRWCmdExec(pod2, writeCmd)
By("Reading in pod2")
testReadFileContent(volumeDir, testFile, testVol.hostDir, pod2, testVol.localVolumeType)
By("Reading in pod1")
testReadFileContent(volumeDir, testFile, testVol.hostDir, pod1, testVol.localVolumeType)
By("Deleting pod1")
framework.DeletePodOrFail(config.client, config.ns, pod1.Name)
@@ -792,14 +827,14 @@ func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolum
Expect(pod1Err).NotTo(HaveOccurred())
verifyLocalPod(config, testVol, pod1, config.node0.Name)
// testFileContent was written during setupLocalVolume
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
writeCmd := createWriteCmd(volumeDir, testFile, testVol.hostDir /*writeTestFileContent*/, testVol.localVolumeType)
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
By("Writing in pod1")
podRWCmdExec(pod1, writeCmd)
// testFileContent was written after creating pod1
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
By("Deleting pod1")
framework.DeletePodOrFail(config.client, config.ns, pod1.Name)
@@ -809,7 +844,7 @@ func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolum
verifyLocalPod(config, testVol, pod2, config.node0.Name)
By("Reading in pod2")
testReadFileContent(volumeDir, testFile, testVol.hostDir, pod2, testVol.localVolumeType)
testReadFileContent(volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType)
By("Deleting pod2")
framework.DeletePodOrFail(config.client, config.ns, pod2.Name)
@@ -879,11 +914,13 @@ func cleanupLocalVolumes(config *localTestConfig, volumes []*localTestVolume) {
}
}
func setupWriteTestFile(hostDir string, config *localTestConfig, localVolumeType localVolumeType, node *v1.Node) *localTestVolume {
writeCmd, _ := createWriteAndReadCmds(hostDir, testFile, testFileContent, localVolumeType)
By(fmt.Sprintf("Creating test file on node %q in path %q", node.Name, hostDir))
err := framework.IssueSSHCommand(writeCmd, framework.TestContext.Provider, node)
Expect(err).NotTo(HaveOccurred())
func generateLocalTestVolume(hostDir string, config *localTestConfig, localVolumeType localVolumeType, node *v1.Node) *localTestVolume {
if localVolumeType != BlockLocalVolumeType && localVolumeType != BlockFsWithoutFormatLocalVolumeType {
mkdirCmd := fmt.Sprintf("mkdir -p %s", hostDir)
err := issueNodeCommand(config, mkdirCmd, node)
Expect(err).NotTo(HaveOccurred())
}
return &localTestVolume{
node: node,
hostDir: hostDir,
@@ -895,94 +932,171 @@ func setupLocalVolumeTmpfs(config *localTestConfig, node *v1.Node) *localTestVol
testDirName := "local-volume-test-" + string(uuid.NewUUID())
hostDir := filepath.Join(hostBase, testDirName)
createAndMountTmpfsLocalVolume(config, hostDir, node)
// populate volume with testFile containing testFileContent
return setupWriteTestFile(hostDir, config, TmpfsLocalVolumeType, node)
return generateLocalTestVolume(hostDir, config, TmpfsLocalVolumeType, node)
}
func setupLocalVolumeGCELocalSSD(config *localTestConfig, node *v1.Node) *localTestVolume {
res, err := framework.IssueSSHCommandWithResult("ls /mnt/disks/by-uuid/google-local-ssds-scsi-fs/", framework.TestContext.Provider, node)
res, err := issueNodeCommandWithResult(config, "ls /mnt/disks/by-uuid/google-local-ssds-scsi-fs/", node)
Expect(err).NotTo(HaveOccurred())
dirName := strings.Fields(res.Stdout)[0]
dirName := strings.Fields(res)[0]
hostDir := "/mnt/disks/by-uuid/google-local-ssds-scsi-fs/" + dirName
// Populate volume with testFile containing testFileContent.
return setupWriteTestFile(hostDir, config, GCELocalSSDVolumeType, node)
// gce local ssd does not need to create a directory
return &localTestVolume{
node: node,
hostDir: hostDir,
localVolumeType: GCELocalSSDVolumeType,
}
}
func setupLocalVolumeDirectory(config *localTestConfig, node *v1.Node) *localTestVolume {
testDirName := "local-volume-test-" + string(uuid.NewUUID())
hostDir := filepath.Join(hostBase, testDirName)
// Populate volume with testFile containing testFileContent.
return setupWriteTestFile(hostDir, config, DirectoryLocalVolumeType, node)
return generateLocalTestVolume(hostDir, config, DirectoryLocalVolumeType, node)
}
// launchNodeExecPodForLocalPV launches a hostexec pod for local PV and waits
// until it's Running.
func launchNodeExecPodForLocalPV(client clientset.Interface, ns, node string) *v1.Pod {
hostExecPod := framework.NewHostExecPodSpec(ns, fmt.Sprintf("hostexec-%s", node))
hostExecPod.Spec.NodeName = node
hostExecPod.Spec.Volumes = []v1.Volume{
{
// Required to enter into host mount namespace via nsenter.
Name: "rootfs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/",
},
},
},
}
hostExecPod.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{
{
Name: "rootfs",
MountPath: "/rootfs",
ReadOnly: true,
},
}
hostExecPod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{
Privileged: func(privileged bool) *bool {
return &privileged
}(true),
}
pod, err := client.CoreV1().Pods(ns).Create(hostExecPod)
framework.ExpectNoError(err)
err = framework.WaitForPodRunningInNamespace(client, pod)
framework.ExpectNoError(err)
return pod
}
// issueNodeCommandWithResult issues command on given node and returns stdout.
func issueNodeCommandWithResult(config *localTestConfig, cmd string, node *v1.Node) (string, error) {
var pod *v1.Pod
pod, ok := config.nodeExecPods[node.Name]
if !ok {
pod = launchNodeExecPodForLocalPV(config.client, config.ns, node.Name)
if pod == nil {
return "", fmt.Errorf("failed to create hostexec pod for node %q", node)
}
config.nodeExecPods[node.Name] = pod
}
args := []string{
"exec",
fmt.Sprintf("--namespace=%v", pod.Namespace),
pod.Name,
"--",
"nsenter",
"--mount=/rootfs/proc/1/ns/mnt",
"--",
"sh",
"-c",
cmd,
}
return framework.RunKubectl(args...)
}
// issueNodeCommand works like issueNodeCommandWithResult, but discards result.
func issueNodeCommand(config *localTestConfig, cmd string, node *v1.Node) error {
_, err := issueNodeCommandWithResult(config, cmd, node)
return err
}
func setupLocalVolumeDirectoryLink(config *localTestConfig, node *v1.Node) *localTestVolume {
testDirName := "local-volume-test-" + string(uuid.NewUUID())
hostDir := filepath.Join(hostBase, testDirName)
hostDirBackend := hostDir + "-backend"
cmd := fmt.Sprintf("mkdir %s && ln -s %s %s", hostDirBackend, hostDirBackend, hostDir)
_, err := framework.IssueSSHCommandWithResult(cmd, framework.TestContext.Provider, node)
cmd := fmt.Sprintf("mkdir %s && sudo ln -s %s %s", hostDirBackend, hostDirBackend, hostDir)
_, err := issueNodeCommandWithResult(config, cmd, node)
Expect(err).NotTo(HaveOccurred())
// Populate volume with testFile containing testFileContent.
return setupWriteTestFile(hostDir, config, DirectoryLinkLocalVolumeType, node)
return generateLocalTestVolume(hostDir, config, DirectoryLinkLocalVolumeType, node)
}
func setupLocalVolumeDirectoryBindMounted(config *localTestConfig, node *v1.Node) *localTestVolume {
testDirName := "local-volume-test-" + string(uuid.NewUUID())
hostDir := filepath.Join(hostBase, testDirName)
cmd := fmt.Sprintf("mkdir %s && sudo mount --bind %s %s", hostDir, hostDir, hostDir)
_, err := framework.IssueSSHCommandWithResult(cmd, framework.TestContext.Provider, node)
_, err := issueNodeCommandWithResult(config, cmd, node)
Expect(err).NotTo(HaveOccurred())
// Populate volume with testFile containing testFileContent.
return setupWriteTestFile(hostDir, config, DirectoryBindMountedLocalVolumeType, node)
return generateLocalTestVolume(hostDir, config, DirectoryBindMountedLocalVolumeType, node)
}
func setupLocalVolumeDirectoryLinkBindMounted(config *localTestConfig, node *v1.Node) *localTestVolume {
testDirName := "local-volume-test-" + string(uuid.NewUUID())
hostDir := filepath.Join(hostBase, testDirName)
hostDirBackend := hostDir + "-backend"
cmd := fmt.Sprintf("mkdir %s && sudo mount --bind %s %s && ln -s %s %s",
cmd := fmt.Sprintf("mkdir %s && sudo mount --bind %s %s && sudo ln -s %s %s",
hostDirBackend, hostDirBackend, hostDirBackend, hostDirBackend, hostDir)
_, err := framework.IssueSSHCommandWithResult(cmd, framework.TestContext.Provider, node)
_, err := issueNodeCommandWithResult(config, cmd, node)
Expect(err).NotTo(HaveOccurred())
// Populate volume with testFile containing testFileContent.
return setupWriteTestFile(hostDir, config, DirectoryLinkBindMountedLocalVolumeType, node)
return generateLocalTestVolume(hostDir, config, DirectoryLinkBindMountedLocalVolumeType, node)
}
func setupLocalVolumeBlock(config *localTestConfig, node *v1.Node) *localTestVolume {
testDirName := "local-volume-test-" + string(uuid.NewUUID())
hostDir := filepath.Join(hostBase, testDirName)
createAndMapBlockLocalVolume(config, hostDir, node)
loopDev := getBlockLoopDev(hostDir, node)
// Populate block volume with testFile containing testFileContent.
volume := setupWriteTestFile(loopDev, config, BlockLocalVolumeType, node)
loopDev := getBlockLoopDev(config, hostDir, node)
volume := generateLocalTestVolume(loopDev, config, BlockLocalVolumeType, node)
volume.hostDir = loopDev
volume.loopDevDir = hostDir
return volume
}
func setupLocalVolumeBlockFs(config *localTestConfig, node *v1.Node) *localTestVolume {
func setupLocalVolumeBlockFsWithFormat(config *localTestConfig, node *v1.Node) *localTestVolume {
testDirName := "local-volume-test-" + string(uuid.NewUUID())
hostDir := filepath.Join(hostBase, testDirName)
createAndMapBlockLocalVolume(config, hostDir, node)
loopDev := getBlockLoopDev(hostDir, node)
loopDev := getBlockLoopDev(config, hostDir, node)
// format and mount at hostDir
// give others rwx for read/write testing
cmd := fmt.Sprintf("sudo mkfs -t ext4 %s && sudo mount -t ext4 %s %s && sudo chmod o+rwx %s", loopDev, loopDev, hostDir, hostDir)
_, err := framework.IssueSSHCommandWithResult(cmd, framework.TestContext.Provider, node)
_, err := issueNodeCommandWithResult(config, cmd, node)
Expect(err).NotTo(HaveOccurred())
// Populate block volume with testFile containing testFileContent.
volume := setupWriteTestFile(hostDir, config, BlockFsLocalVolumeType, node)
volume := generateLocalTestVolume(hostDir, config, BlockFsWithFormatLocalVolumeType, node)
volume.hostDir = hostDir
volume.loopDevDir = loopDev
return volume
}
func setupLocalVolumeBlockFsWithoutFormat(config *localTestConfig, node *v1.Node) *localTestVolume {
testDirName := "local-volume-test-" + string(uuid.NewUUID())
hostDir := filepath.Join(hostBase, testDirName)
createAndMapBlockLocalVolume(config, hostDir, node)
loopDev := getBlockLoopDev(config, hostDir, node)
volume := generateLocalTestVolume(loopDev, config, BlockFsWithoutFormatLocalVolumeType, node)
// we do this in order to set block device path to local PV spec path directly
// and test local volume plugin FileSystem mode on block device
volume.hostDir = loopDev
volume.loopDevDir = hostDir
return volume
}
// Determine the /dev/loopXXX device associated with this test, via its hostDir.
func getBlockLoopDev(hostDir string, node *v1.Node) string {
func getBlockLoopDev(config *localTestConfig, hostDir string, node *v1.Node) string {
loopDevCmd := fmt.Sprintf("E2E_LOOP_DEV=$(sudo losetup | grep %s/file | awk '{ print $1 }') 2>&1 > /dev/null && echo ${E2E_LOOP_DEV}", hostDir)
loopDevResult, err := framework.IssueSSHCommandWithResult(loopDevCmd, framework.TestContext.Provider, node)
loopDevResult, err := issueNodeCommandWithResult(config, loopDevCmd, node)
Expect(err).NotTo(HaveOccurred())
return strings.TrimSpace(loopDevResult.Stdout)
return strings.TrimSpace(loopDevResult)
}
func verifyLocalVolume(config *localTestConfig, volume *localTestVolume) {
@@ -999,8 +1113,9 @@ func verifyLocalPod(config *localTestConfig, volume *localTestVolume, pod *v1.Po
// Deletes the PVC/PV, and launches a pod with hostpath volume to remove the test directory.
func cleanupLocalVolumeGCELocalSSD(config *localTestConfig, volume *localTestVolume) {
By("Removing the test directory")
removeCmd := fmt.Sprintf("rm %s", volume.hostDir+"/"+testFile)
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, volume.node)
file := volume.hostDir + "/" + testFile
removeCmd := fmt.Sprintf("if [ -f %s ]; then rm %s; fi", file, file)
err := issueNodeCommand(config, removeCmd, volume.node)
Expect(err).NotTo(HaveOccurred())
}
@@ -1010,7 +1125,7 @@ func cleanupLocalVolumeTmpfs(config *localTestConfig, volume *localTestVolume) {
By("Removing the test directory")
removeCmd := fmt.Sprintf("rm -r %s", volume.hostDir)
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, volume.node)
err := issueNodeCommand(config, removeCmd, volume.node)
Expect(err).NotTo(HaveOccurred())
}
@@ -1018,7 +1133,7 @@ func cleanupLocalVolumeTmpfs(config *localTestConfig, volume *localTestVolume) {
func cleanupLocalVolumeDirectory(config *localTestConfig, volume *localTestVolume) {
By("Removing the test directory")
removeCmd := fmt.Sprintf("rm -r %s", volume.hostDir)
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, volume.node)
err := issueNodeCommand(config, removeCmd, volume.node)
Expect(err).NotTo(HaveOccurred())
}
@@ -1027,8 +1142,8 @@ func cleanupLocalVolumeDirectoryLink(config *localTestConfig, volume *localTestV
By("Removing the test directory")
hostDir := volume.hostDir
hostDirBackend := hostDir + "-backend"
removeCmd := fmt.Sprintf("rm -r %s && rm -r %s", hostDir, hostDirBackend)
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, volume.node)
removeCmd := fmt.Sprintf("sudo rm -r %s && rm -r %s", hostDir, hostDirBackend)
err := issueNodeCommand(config, removeCmd, volume.node)
Expect(err).NotTo(HaveOccurred())
}
@@ -1037,7 +1152,7 @@ func cleanupLocalVolumeDirectoryBindMounted(config *localTestConfig, volume *loc
By("Removing the test directory")
hostDir := volume.hostDir
removeCmd := fmt.Sprintf("sudo umount %s && rm -r %s", hostDir, hostDir)
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, volume.node)
err := issueNodeCommand(config, removeCmd, volume.node)
Expect(err).NotTo(HaveOccurred())
}
@@ -1046,8 +1161,8 @@ func cleanupLocalVolumeDirectoryLinkBindMounted(config *localTestConfig, volume
By("Removing the test directory")
hostDir := volume.hostDir
hostDirBackend := hostDir + "-backend"
removeCmd := fmt.Sprintf("rm %s && sudo umount %s && rm -r %s", hostDir, hostDirBackend, hostDirBackend)
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, volume.node)
removeCmd := fmt.Sprintf("sudo rm %s && sudo umount %s && rm -r %s", hostDir, hostDirBackend, hostDirBackend)
err := issueNodeCommand(config, removeCmd, volume.node)
Expect(err).NotTo(HaveOccurred())
}
@@ -1057,20 +1172,29 @@ func cleanupLocalVolumeBlock(config *localTestConfig, volume *localTestVolume) {
unmapBlockLocalVolume(config, volume.hostDir, volume.node)
By("Removing the test directory")
removeCmd := fmt.Sprintf("rm -r %s", volume.hostDir)
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, volume.node)
err := issueNodeCommand(config, removeCmd, volume.node)
Expect(err).NotTo(HaveOccurred())
}
// Deletes the PVC/PV and removes the test directory holding the block file.
func cleanupLocalVolumeBlockFs(config *localTestConfig, volume *localTestVolume) {
func cleanupLocalVolumeBlockFsWithFormat(config *localTestConfig, volume *localTestVolume) {
// umount first
By("Umount blockfs mountpoint")
umountCmd := fmt.Sprintf("sudo umount %s", volume.hostDir)
err := framework.IssueSSHCommand(umountCmd, framework.TestContext.Provider, volume.node)
err := issueNodeCommand(config, umountCmd, volume.node)
unmapBlockLocalVolume(config, volume.hostDir, volume.node)
By("Removing the test directory")
removeCmd := fmt.Sprintf("rm -r %s", volume.hostDir)
err = framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, volume.node)
err = issueNodeCommand(config, removeCmd, volume.node)
Expect(err).NotTo(HaveOccurred())
}
func cleanupLocalVolumeBlockFsWithoutFormat(config *localTestConfig, volume *localTestVolume) {
volume.hostDir = volume.loopDevDir
unmapBlockLocalVolume(config, volume.hostDir, volume.node)
By("Removing the test directory")
removeCmd := fmt.Sprintf("rm -r %s", volume.hostDir)
err := issueNodeCommand(config, removeCmd, volume.node)
Expect(err).NotTo(HaveOccurred())
}
@@ -1167,7 +1291,7 @@ func makeLocalPod(config *localTestConfig, volume *localTestVolume, cmd string)
}
if volume.localVolumeType == BlockLocalVolumeType {
// Block e2e tests require utilities for writing to block devices (e.g. dd), and nginx has this utilites.
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.NginxSlim)
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.Nginx)
}
return pod
}
@@ -1236,13 +1360,13 @@ func createLocalPod(config *localTestConfig, volume *localTestVolume, fsGroup *i
func createAndMountTmpfsLocalVolume(config *localTestConfig, dir string, node *v1.Node) {
By(fmt.Sprintf("Creating tmpfs mount point on node %q at path %q", node.Name, dir))
err := framework.IssueSSHCommand(fmt.Sprintf("mkdir -p %q && sudo mount -t tmpfs -o size=1m tmpfs-%q %q", dir, dir, dir), framework.TestContext.Provider, node)
err := issueNodeCommand(config, fmt.Sprintf("mkdir -p %q && sudo mount -t tmpfs -o size=1m tmpfs-%q %q", dir, dir, dir), node)
Expect(err).NotTo(HaveOccurred())
}
func unmountTmpfsLocalVolume(config *localTestConfig, dir string, node *v1.Node) {
By(fmt.Sprintf("Unmount tmpfs mount point on node %q at path %q", node.Name, dir))
err := framework.IssueSSHCommand(fmt.Sprintf("sudo umount %q", dir), framework.TestContext.Provider, node)
err := issueNodeCommand(config, fmt.Sprintf("sudo umount %q", dir), node)
Expect(err).NotTo(HaveOccurred())
}
@@ -1251,28 +1375,19 @@ func createAndMapBlockLocalVolume(config *localTestConfig, dir string, node *v1.
mkdirCmd := fmt.Sprintf("mkdir -p %s", dir)
// Create 10MB file that will serve as the backing for block device.
ddCmd := fmt.Sprintf("dd if=/dev/zero of=%s/file bs=512 count=20480", dir)
losetupLoopDevCmd := fmt.Sprintf("E2E_LOOP_DEV=$(sudo losetup -f) && echo ${E2E_LOOP_DEV}")
losetupCmd := fmt.Sprintf("sudo losetup ${E2E_LOOP_DEV} %s/file", dir)
err := framework.IssueSSHCommand(fmt.Sprintf("%s && %s && %s && %s", mkdirCmd, ddCmd, losetupLoopDevCmd, losetupCmd), framework.TestContext.Provider, node)
losetupCmd := fmt.Sprintf("sudo losetup -f %s/file", dir)
err := issueNodeCommand(config, fmt.Sprintf("%s && %s && %s", mkdirCmd, ddCmd, losetupCmd), node)
Expect(err).NotTo(HaveOccurred())
}
func unmapBlockLocalVolume(config *localTestConfig, dir string, node *v1.Node) {
loopDev := getBlockLoopDev(dir, node)
loopDev := getBlockLoopDev(config, dir, node)
By(fmt.Sprintf("Unmap block device %q on node %q at path %s/file", loopDev, node.Name, dir))
losetupDeleteCmd := fmt.Sprintf("sudo losetup -d %s", loopDev)
err := framework.IssueSSHCommand(losetupDeleteCmd, framework.TestContext.Provider, node)
err := issueNodeCommand(config, losetupDeleteCmd, node)
Expect(err).NotTo(HaveOccurred())
}
// Create corresponding write and read commands
// to be executed via SSH on the node with the local PV
func createWriteAndReadCmds(testFileDir string, testFile string, writeTestFileContent string, volumeType localVolumeType) (writeCmd string, readCmd string) {
writeCmd = createWriteCmd(testFileDir, testFile, writeTestFileContent, volumeType)
readCmd = createReadCmd(testFileDir, testFile, volumeType)
return writeCmd, readCmd
}
func createWriteCmd(testDir string, testFile string, writeTestFileContent string, volumeType localVolumeType) string {
if volumeType == BlockLocalVolumeType {
// testDir is the block device.
@@ -1306,13 +1421,13 @@ func createReadCmd(testFileDir string, testFile string, volumeType localVolumeTy
// Read testFile and evaluate whether it contains the testFileContent
func testReadFileContent(testFileDir string, testFile string, testFileContent string, pod *v1.Pod, volumeType localVolumeType) {
readCmd := createReadCmd(volumeDir, testFile, volumeType)
readCmd := createReadCmd(testFileDir, testFile, volumeType)
readOut := podRWCmdExec(pod, readCmd)
Expect(readOut).To(ContainSubstring(testFileContent))
}
// Create command to verify that the file doesn't exist
// to be executed via SSH on the node with the local PV
// to be executed via hostexec Pod on the node with the local PV
func createFileDoesntExistCmd(testFileDir string, testFile string) string {
testFilePath := filepath.Join(testFileDir, testFile)
return fmt.Sprintf("[ ! -e %s ]", testFilePath)
@@ -1322,7 +1437,7 @@ func createFileDoesntExistCmd(testFileDir string, testFile string) string {
// Fail on error
func podRWCmdExec(pod *v1.Pod, cmd string) string {
out, err := utils.PodExec(pod, cmd)
framework.Logf("podRWCmdExec out: %q err: %q", out, err)
framework.Logf("podRWCmdExec out: %q err: %v", out, err)
Expect(err).NotTo(HaveOccurred())
return out
}
@@ -1349,12 +1464,13 @@ func setupLocalVolumeProvisioner(config *localTestConfig) {
By("Bootstrapping local volume provisioner")
createServiceAccount(config)
createProvisionerClusterRoleBinding(config)
utils.PrivilegedTestPSPClusterRoleBinding(config.client, config.ns, false /* teardown */, []string{testServiceAccount})
createVolumeConfigMap(config)
for _, node := range config.nodes {
By(fmt.Sprintf("Initializing local volume discovery base path on node %v", node.Name))
mkdirCmd := fmt.Sprintf("mkdir -p %v -m 777", config.discoveryDir)
err := framework.IssueSSHCommand(mkdirCmd, framework.TestContext.Provider, &node)
err := issueNodeCommand(config, mkdirCmd, &node)
Expect(err).NotTo(HaveOccurred())
}
}
@@ -1362,11 +1478,12 @@ func setupLocalVolumeProvisioner(config *localTestConfig) {
func cleanupLocalVolumeProvisioner(config *localTestConfig) {
By("Cleaning up cluster role binding")
deleteClusterRoleBinding(config)
utils.PrivilegedTestPSPClusterRoleBinding(config.client, config.ns, true /* teardown */, []string{testServiceAccount})
for _, node := range config.nodes {
By(fmt.Sprintf("Removing the test discovery directory on node %v", node.Name))
removeCmd := fmt.Sprintf("[ ! -e %v ] || rm -r %v", config.discoveryDir, config.discoveryDir)
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, &node)
err := issueNodeCommand(config, removeCmd, &node)
Expect(err).NotTo(HaveOccurred())
}
}
@@ -1374,24 +1491,24 @@ func cleanupLocalVolumeProvisioner(config *localTestConfig) {
func setupLocalVolumeProvisionerMountPoint(config *localTestConfig, volumePath string, node *v1.Node) {
By(fmt.Sprintf("Creating local directory at path %q", volumePath))
mkdirCmd := fmt.Sprintf("mkdir %v -m 777", volumePath)
err := framework.IssueSSHCommand(mkdirCmd, framework.TestContext.Provider, node)
err := issueNodeCommand(config, mkdirCmd, node)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Mounting local directory at path %q", volumePath))
mntCmd := fmt.Sprintf("sudo mount --bind %v %v", volumePath, volumePath)
err = framework.IssueSSHCommand(mntCmd, framework.TestContext.Provider, node)
err = issueNodeCommand(config, mntCmd, node)
Expect(err).NotTo(HaveOccurred())
}
func cleanupLocalVolumeProvisionerMountPoint(config *localTestConfig, volumePath string, node *v1.Node) {
By(fmt.Sprintf("Unmounting the test mount point from %q", volumePath))
umountCmd := fmt.Sprintf("[ ! -e %v ] || sudo umount %v", volumePath, volumePath)
err := framework.IssueSSHCommand(umountCmd, framework.TestContext.Provider, node)
err := issueNodeCommand(config, umountCmd, node)
Expect(err).NotTo(HaveOccurred())
By("Removing the test mount point")
removeCmd := fmt.Sprintf("[ ! -e %v ] || rm -r %v", volumePath, volumePath)
err = framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, node)
err = issueNodeCommand(config, removeCmd, node)
Expect(err).NotTo(HaveOccurred())
By("Cleaning up persistent volume")
@@ -1736,7 +1853,7 @@ func findLocalPersistentVolume(c clientset.Interface, volumePath string) (*v1.Pe
return nil, nil
}
func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount int) *appsv1.StatefulSet {
func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount int, anti, parallel bool) *appsv1.StatefulSet {
mounts := []v1.VolumeMount{}
claims := []v1.PersistentVolumeClaim{}
for i := 0; i < volumeCount; i++ {
@@ -1746,25 +1863,32 @@ func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount in
claims = append(claims, *pvc)
}
affinity := v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "app",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"local-volume-test"},
},
},
podAffinityTerms := []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "app",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"local-volume-test"},
},
TopologyKey: "kubernetes.io/hostname",
},
},
TopologyKey: "kubernetes.io/hostname",
},
}
affinity := v1.Affinity{}
if anti {
affinity.PodAntiAffinity = &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: podAffinityTerms,
}
} else {
affinity.PodAffinity = &v1.PodAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: podAffinityTerms,
}
}
labels := map[string]string{"app": "local-volume-test"}
spec := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
@@ -1784,7 +1908,7 @@ func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount in
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
VolumeMounts: mounts,
},
},
@@ -1796,6 +1920,10 @@ func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount in
},
}
if parallel {
spec.Spec.PodManagementPolicy = appsv1.ParallelPodManagement
}
ss, err := config.client.AppsV1().StatefulSets(config.ns).Create(spec)
Expect(err).NotTo(HaveOccurred())
@@ -1803,16 +1931,21 @@ func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount in
return ss
}
func validateStatefulSet(config *localTestConfig, ss *appsv1.StatefulSet) {
func validateStatefulSet(config *localTestConfig, ss *appsv1.StatefulSet, anti bool) {
pods := config.ssTester.GetPodList(ss)
// Verify that each pod is on a different node
nodes := sets.NewString()
for _, pod := range pods.Items {
nodes.Insert(pod.Spec.NodeName)
}
Expect(nodes.Len()).To(Equal(len(pods.Items)))
if anti {
// Verify that each pod is on a different node
Expect(nodes.Len()).To(Equal(len(pods.Items)))
} else {
// Verify that all pods are on same node.
Expect(nodes.Len()).To(Equal(1))
}
// Validate all PVCs are bound
for _, pod := range pods.Items {
@@ -1829,11 +1962,11 @@ func validateStatefulSet(config *localTestConfig, ss *appsv1.StatefulSet) {
// SkipUnlessLocalSSDExists takes in an ssdInterface (scsi/nvme) and a filesystemType (fs/block)
// and skips if a disk of that type does not exist on the node
func SkipUnlessLocalSSDExists(ssdInterface, filesystemType string, node *v1.Node) {
func SkipUnlessLocalSSDExists(config *localTestConfig, ssdInterface, filesystemType string, node *v1.Node) {
ssdCmd := fmt.Sprintf("ls -1 /mnt/disks/by-uuid/google-local-ssds-%s-%s/ | wc -l", ssdInterface, filesystemType)
res, err := framework.IssueSSHCommandWithResult(ssdCmd, framework.TestContext.Provider, node)
res, err := issueNodeCommandWithResult(config, ssdCmd, node)
Expect(err).NotTo(HaveOccurred())
num, err := strconv.Atoi(strings.TrimSpace(res.Stdout))
num, err := strconv.Atoi(strings.TrimSpace(res))
Expect(err).NotTo(HaveOccurred())
if num < 1 {
framework.Skipf("Requires at least 1 %s %s localSSD ", ssdInterface, filesystemType)

View File

@@ -23,6 +23,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@@ -30,6 +31,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
// Validate PV/PVC, create and verify writer pod, delete the PVC, and validate the PV's
@@ -281,6 +283,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, pod.Name, ns))
By("Deleting the claim")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
framework.ExpectNoError(framework.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable))
By("Re-mounting the volume.")
@@ -296,8 +299,126 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, pod.Name, ns))
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
framework.Logf("Pod exited without failure; the volume has been recycled.")
})
})
})
Describe("Default StorageClass", func() {
Context("pods that use multiple volumes", func() {
AfterEach(func() {
framework.DeleteAllStatefulSets(c, ns)
})
It("should be reschedulable", func() {
// Only run on providers with default storageclass
framework.SkipUnlessProviderIs("openstack", "gce", "gke", "vsphere", "azure")
numVols := 4
ssTester := framework.NewStatefulSetTester(c)
By("Creating a StatefulSet pod to initialize data")
writeCmd := "true"
for i := 0; i < numVols; i++ {
writeCmd += fmt.Sprintf("&& touch %v", getVolumeFile(i))
}
writeCmd += "&& sleep 10000"
probe := &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
// Check that the last file got created
Command: []string{"test", "-f", getVolumeFile(numVols - 1)},
},
},
InitialDelaySeconds: 1,
PeriodSeconds: 1,
}
mounts := []v1.VolumeMount{}
claims := []v1.PersistentVolumeClaim{}
for i := 0; i < numVols; i++ {
pvc := framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{}, ns)
pvc.Name = getVolName(i)
mounts = append(mounts, v1.VolumeMount{Name: pvc.Name, MountPath: getMountPath(i)})
claims = append(claims, *pvc)
}
spec := makeStatefulSetWithPVCs(ns, writeCmd, mounts, claims, probe)
ss, err := c.AppsV1().StatefulSets(ns).Create(spec)
Expect(err).NotTo(HaveOccurred())
ssTester.WaitForRunningAndReady(1, ss)
By("Deleting the StatefulSet but not the volumes")
// Scale down to 0 first so that the Delete is quick
ss, err = ssTester.Scale(ss, 0)
Expect(err).NotTo(HaveOccurred())
ssTester.WaitForStatusReplicas(ss, 0)
err = c.AppsV1().StatefulSets(ns).Delete(ss.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
By("Creating a new Statefulset and validating the data")
validateCmd := "true"
for i := 0; i < numVols; i++ {
validateCmd += fmt.Sprintf("&& test -f %v", getVolumeFile(i))
}
validateCmd += "&& sleep 10000"
spec = makeStatefulSetWithPVCs(ns, validateCmd, mounts, claims, probe)
ss, err = c.AppsV1().StatefulSets(ns).Create(spec)
Expect(err).NotTo(HaveOccurred())
ssTester.WaitForRunningAndReady(1, ss)
})
})
})
})
func getVolName(i int) string {
return fmt.Sprintf("vol%v", i)
}
func getMountPath(i int) string {
return fmt.Sprintf("/mnt/%v", getVolName(i))
}
func getVolumeFile(i int) string {
return fmt.Sprintf("%v/data%v", getMountPath(i), i)
}
func makeStatefulSetWithPVCs(ns, cmd string, mounts []v1.VolumeMount, claims []v1.PersistentVolumeClaim, readyProbe *v1.Probe) *appsv1.StatefulSet {
ssReplicas := int32(1)
labels := map[string]string{"app": "many-volumes-test"}
return &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: "many-volumes-test",
Namespace: ns,
},
Spec: appsv1.StatefulSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "many-volumes-test"},
},
Replicas: &ssReplicas,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.Nginx),
Command: []string{"/bin/sh"},
Args: []string{"-c", cmd},
VolumeMounts: mounts,
ReadinessProbe: readyProbe,
},
},
},
},
VolumeClaimTemplates: claims,
},
}
}

View File

@@ -17,6 +17,8 @@ limitations under the License.
package storage
import (
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -77,6 +79,9 @@ var _ = utils.SIGDescribe("PV Protection", func() {
pv, err = client.CoreV1().PersistentVolumes().Create(pv)
Expect(err).NotTo(HaveOccurred(), "Error creating PV")
By("Waiting for PV to enter phase Available")
framework.ExpectNoError(framework.WaitForPersistentVolumePhase(v1.VolumeAvailable, client, pv.Name, 1*time.Second, 30*time.Second))
By("Checking that PV Protection finalizer is set")
pv, err = client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "While getting PV status")

View File

@@ -34,9 +34,11 @@ import (
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/kubelet/apis"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
@@ -64,10 +66,21 @@ var _ = utils.SIGDescribe("Regional PD", func() {
testVolumeProvisioning(c, ns)
})
It("should provision storage with delayed binding [Slow]", func() {
testRegionalDelayedBinding(c, ns)
})
It("should provision storage in the allowedTopologies [Slow]", func() {
testRegionalAllowedTopologies(c, ns)
})
It("should provision storage in the allowedTopologies with delayed binding [Slow]", func() {
testRegionalAllowedTopologiesWithDelayedBinding(c, ns)
})
It("should failover to a different zone when all nodes in one zone become unreachable [Slow] [Disruptive]", func() {
testZonalFailover(c, ns)
})
})
})
@@ -86,8 +99,8 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
"zones": strings.Join(cloudZones, ","),
"replication-type": "regional-pd",
},
claimSize: "1.5G",
expectedSize: "2G",
claimSize: "1.5Gi",
expectedSize: "2Gi",
pvCheck: func(volume *v1.PersistentVolume) error {
err := checkGCEPD(volume, "pd-standard")
if err != nil {
@@ -104,8 +117,8 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
"type": "pd-standard",
"replication-type": "regional-pd",
},
claimSize: "1.5G",
expectedSize: "2G",
claimSize: "1.5Gi",
expectedSize: "2Gi",
pvCheck: func(volume *v1.PersistentVolume) error {
err := checkGCEPD(volume, "pd-standard")
if err != nil {
@@ -263,6 +276,94 @@ func testZonalFailover(c clientset.Interface, ns string) {
}
func testRegionalDelayedBinding(c clientset.Interface, ns string) {
test := storageClassTest{
name: "Regional PD storage class with waitForFirstConsumer test on GCE",
provisioner: "kubernetes.io/gce-pd",
parameters: map[string]string{
"type": "pd-standard",
"replication-type": "regional-pd",
},
claimSize: "2Gi",
delayBinding: true,
}
suffix := "delayed-regional"
class := newStorageClass(test, ns, suffix)
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
pv, node := testBindingWaitForFirstConsumer(c, claim, class)
if node == nil {
framework.Failf("unexpected nil node found")
}
zone, ok := node.Labels[kubeletapis.LabelZoneFailureDomain]
if !ok {
framework.Failf("label %s not found on Node", kubeletapis.LabelZoneFailureDomain)
}
checkZoneFromLabelAndAffinity(pv, zone, false)
}
func testRegionalAllowedTopologies(c clientset.Interface, ns string) {
test := storageClassTest{
name: "Regional PD storage class with allowedTopologies test on GCE",
provisioner: "kubernetes.io/gce-pd",
parameters: map[string]string{
"type": "pd-standard",
"replication-type": "regional-pd",
},
claimSize: "2Gi",
expectedSize: "2Gi",
}
suffix := "topo-regional"
class := newStorageClass(test, ns, suffix)
zones := getTwoRandomZones(c)
addAllowedTopologiesToStorageClass(c, class, zones)
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
pv := testDynamicProvisioning(test, c, claim, class)
checkZonesFromLabelAndAffinity(pv, sets.NewString(zones...), true)
}
func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns string) {
test := storageClassTest{
name: "Regional PD storage class with allowedTopologies and waitForFirstConsumer test on GCE",
provisioner: "kubernetes.io/gce-pd",
parameters: map[string]string{
"type": "pd-standard",
"replication-type": "regional-pd",
},
claimSize: "2Gi",
delayBinding: true,
}
suffix := "topo-delayed-regional"
class := newStorageClass(test, ns, suffix)
topoZones := getTwoRandomZones(c)
addAllowedTopologiesToStorageClass(c, class, topoZones)
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
pv, node := testBindingWaitForFirstConsumer(c, claim, class)
if node == nil {
framework.Failf("unexpected nil node found")
}
nodeZone, ok := node.Labels[kubeletapis.LabelZoneFailureDomain]
if !ok {
framework.Failf("label %s not found on Node", kubeletapis.LabelZoneFailureDomain)
}
zoneFound := false
for _, zone := range topoZones {
if zone == nodeZone {
zoneFound = true
break
}
}
if !zoneFound {
framework.Failf("zones specified in AllowedTopologies: %v does not contain zone of node where PV got provisioned: %s", topoZones, nodeZone)
}
checkZonesFromLabelAndAffinity(pv, sets.NewString(topoZones...), true)
}
func getPVC(c clientset.Interface, ns string, pvcLabels map[string]string) *v1.PersistentVolumeClaim {
selector := labels.Set(pvcLabels).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
@@ -283,6 +384,18 @@ func getPod(c clientset.Interface, ns string, podLabels map[string]string) *v1.P
return &podList.Items[0]
}
func addAllowedTopologiesToStorageClass(c clientset.Interface, sc *storage.StorageClass, zones []string) {
term := v1.TopologySelectorTerm{
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
{
Key: kubeletapis.LabelZoneFailureDomain,
Values: zones,
},
},
}
sc.AllowedTopologies = append(sc.AllowedTopologies, term)
}
// Generates the spec of a StatefulSet with 1 replica that mounts a Regional PD.
func newStatefulSet(claimTemplate *v1.PersistentVolumeClaim, ns string) (sts *appsv1.StatefulSet, svc *v1.Service, labels map[string]string) {
var replicas int32 = 1
@@ -334,7 +447,7 @@ func newPodTemplate(labels map[string]string) *v1.PodTemplateSpec {
// and prints the entire file to stdout.
{
Name: "busybox",
Image: "k8s.gcr.io/busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c"},
Args: []string{
"echo ${POD_NAME} >> /mnt/data/regional-pd/pods.txt;" +

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,26 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["testpattern.go"],
importpath = "k8s.io/kubernetes/test/e2e/storage/testpatterns",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//test/e2e/framework:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,168 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testpatterns
import (
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
// MinFileSize represents minimum file size (1 MiB) for testing
MinFileSize = 1 * framework.MiB
// FileSizeSmall represents small file size (1 MiB) for testing
FileSizeSmall = 1 * framework.MiB
// FileSizeMedium represents medium file size (100 MiB) for testing
FileSizeMedium = 100 * framework.MiB
// FileSizeLarge represents large file size (1 GiB) for testing
FileSizeLarge = 1 * framework.GiB
)
// TestVolType represents a volume type to be tested in a TestSuite
type TestVolType string
var (
// InlineVolume represents a volume type that is used inline in volumeSource
InlineVolume TestVolType = "InlineVolume"
// PreprovisionedPV represents a volume type for pre-provisioned Persistent Volume
PreprovisionedPV TestVolType = "PreprovisionedPV"
// DynamicPV represents a volume type for dynamic provisioned Persistent Volume
DynamicPV TestVolType = "DynamicPV"
)
// TestPattern represents a combination of parameters to be tested in a TestSuite
type TestPattern struct {
Name string // Name of TestPattern
FeatureTag string // featureTag for the TestSuite
VolType TestVolType // Volume type of the volume
FsType string // Fstype of the volume
VolMode v1.PersistentVolumeMode // PersistentVolumeMode of the volume
}
var (
// Definitions for default fsType
// DefaultFsInlineVolume is TestPattern for "Inline-volume (default fs)"
DefaultFsInlineVolume = TestPattern{
Name: "Inline-volume (default fs)",
VolType: InlineVolume,
}
// DefaultFsPreprovisionedPV is TestPattern for "Pre-provisioned PV (default fs)"
DefaultFsPreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (default fs)",
VolType: PreprovisionedPV,
}
// DefaultFsDynamicPV is TestPattern for "Dynamic PV (default fs)"
DefaultFsDynamicPV = TestPattern{
Name: "Dynamic PV (default fs)",
VolType: DynamicPV,
}
// Definitions for ext3
// Ext3InlineVolume is TestPattern for "Inline-volume (ext3)"
Ext3InlineVolume = TestPattern{
Name: "Inline-volume (ext3)",
VolType: InlineVolume,
FsType: "ext3",
}
// Ext3PreprovisionedPV is TestPattern for "Pre-provisioned PV (ext3)"
Ext3PreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (ext3)",
VolType: PreprovisionedPV,
FsType: "ext3",
}
// Ext3DynamicPV is TestPattern for "Dynamic PV (ext3)"
Ext3DynamicPV = TestPattern{
Name: "Dynamic PV (ext3)",
VolType: DynamicPV,
FsType: "ext3",
}
// Definitions for ext4
// Ext4InlineVolume is TestPattern for "Inline-volume (ext4)"
Ext4InlineVolume = TestPattern{
Name: "Inline-volume (ext4)",
VolType: InlineVolume,
FsType: "ext4",
}
// Ext4PreprovisionedPV is TestPattern for "Pre-provisioned PV (ext4)"
Ext4PreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (ext4)",
VolType: PreprovisionedPV,
FsType: "ext4",
}
// Ext4DynamicPV is TestPattern for "Dynamic PV (ext4)"
Ext4DynamicPV = TestPattern{
Name: "Dynamic PV (ext4)",
VolType: DynamicPV,
FsType: "ext4",
}
// Definitions for xfs
// XfsInlineVolume is TestPattern for "Inline-volume (xfs)"
XfsInlineVolume = TestPattern{
Name: "Inline-volume (xfs)",
VolType: InlineVolume,
FsType: "xfs",
}
// XfsPreprovisionedPV is TestPattern for "Pre-provisioned PV (xfs)"
XfsPreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (xfs)",
VolType: PreprovisionedPV,
FsType: "xfs",
}
// XfsDynamicPV is TestPattern for "Dynamic PV (xfs)"
XfsDynamicPV = TestPattern{
Name: "Dynamic PV (xfs)",
VolType: DynamicPV,
FsType: "xfs",
}
// Definitions for Filesystem volume mode
// FsVolModePreprovisionedPV is TestPattern for "Pre-provisioned PV (filesystem)"
FsVolModePreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (filesystem volmode)",
VolType: PreprovisionedPV,
VolMode: v1.PersistentVolumeFilesystem,
}
// FsVolModeDynamicPV is TestPattern for "Dynamic PV (filesystem)"
FsVolModeDynamicPV = TestPattern{
Name: "Dynamic PV (filesystem volmode)",
VolType: DynamicPV,
VolMode: v1.PersistentVolumeFilesystem,
}
// Definitions for block volume mode
// BlockVolModePreprovisionedPV is TestPattern for "Pre-provisioned PV (block)"
BlockVolModePreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (block volmode)",
VolType: PreprovisionedPV,
VolMode: v1.PersistentVolumeBlock,
}
// BlockVolModeDynamicPV is TestPattern for "Dynamic PV (block)(immediate bind)"
BlockVolModeDynamicPV = TestPattern{
Name: "Dynamic PV (block volmode)",
VolType: DynamicPV,
VolMode: v1.PersistentVolumeBlock,
}
)

View File

@@ -0,0 +1,47 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"base.go",
"subpath.go",
"volume_io.go",
"volumemode.go",
"volumes.go",
],
importpath = "k8s.io/kubernetes/test/e2e/storage/testsuites",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/storage/drivers:go_default_library",
"//test/e2e/storage/testpatterns:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,325 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testsuites
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
// TestSuite represents an interface for a set of tests whchi works with TestDriver
type TestSuite interface {
// getTestSuiteInfo returns the TestSuiteInfo for this TestSuite
getTestSuiteInfo() TestSuiteInfo
// skipUnsupportedTest skips the test if this TestSuite is not suitable to be tested with the combination of TestPattern and TestDriver
skipUnsupportedTest(testpatterns.TestPattern, drivers.TestDriver)
// execTest executes test of the testpattern for the driver
execTest(drivers.TestDriver, testpatterns.TestPattern)
}
type TestSuiteInfo struct {
name string // name of the TestSuite
featureTag string // featureTag for the TestSuite
testPatterns []testpatterns.TestPattern // Slice of TestPattern for the TestSuite
}
// TestResource represents an interface for resources that is used by TestSuite
type TestResource interface {
// setupResource sets up test resources to be used for the tests with the
// combination of TestDriver and TestPattern
setupResource(drivers.TestDriver, testpatterns.TestPattern)
// cleanupResource clean up the test resources created in SetupResource
cleanupResource(drivers.TestDriver, testpatterns.TestPattern)
}
func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string {
tsInfo := suite.getTestSuiteInfo()
return fmt.Sprintf("[Testpattern: %s]%s %s%s", pattern.Name, pattern.FeatureTag, tsInfo.name, tsInfo.featureTag)
}
// RunTestSuite runs all testpatterns of all testSuites for a driver
func RunTestSuite(f *framework.Framework, config framework.VolumeTestConfig, driver drivers.TestDriver, tsInits []func() TestSuite) {
for _, testSuiteInit := range tsInits {
suite := testSuiteInit()
tsInfo := suite.getTestSuiteInfo()
for _, pattern := range tsInfo.testPatterns {
suite.execTest(driver, pattern)
}
}
}
// skipUnsupportedTest will skip tests if the combination of driver, testsuite, and testpattern
// is not suitable to be tested.
// Whether it needs to be skipped is checked by following steps:
// 1. Check if Whether volType is supported by driver from its interface
// 2. Check if fsType is supported by driver
// 3. Check with driver specific logic
// 4. Check with testSuite specific logic
func skipUnsupportedTest(suite TestSuite, driver drivers.TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
// 1. Check if Whether volType is supported by driver from its interface
var isSupported bool
switch pattern.VolType {
case testpatterns.InlineVolume:
_, isSupported = driver.(drivers.InlineVolumeTestDriver)
case testpatterns.PreprovisionedPV:
_, isSupported = driver.(drivers.PreprovisionedPVTestDriver)
case testpatterns.DynamicPV:
_, isSupported = driver.(drivers.DynamicPVTestDriver)
default:
isSupported = false
}
if !isSupported {
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
}
// 2. Check if fsType is supported by driver
if !dInfo.SupportedFsType.Has(pattern.FsType) {
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.FsType)
}
// 3. Check with driver specific logic
driver.SkipUnsupportedTest(pattern)
// 4. Check with testSuite specific logic
suite.skipUnsupportedTest(pattern, driver)
}
// genericVolumeTestResource is a generic implementation of TestResource that wil be able to
// be used in most of TestSuites.
// See volume_io.go or volumes.go in test/e2e/storage/testsuites/ for how to use this resource.
// Also, see subpath.go in the same directory for how to extend and use it.
type genericVolumeTestResource struct {
driver drivers.TestDriver
volType string
volSource *v1.VolumeSource
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
sc *storagev1.StorageClass
driverTestResource interface{}
}
var _ TestResource = &genericVolumeTestResource{}
// SetupResource sets up genericVolumeTestResource
func (r *genericVolumeTestResource) setupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
r.driver = driver
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
cs := f.ClientSet
fsType := pattern.FsType
volType := pattern.VolType
// Create volume for pre-provisioned volume tests
r.driverTestResource = drivers.CreateVolume(driver, volType)
switch volType {
case testpatterns.InlineVolume:
framework.Logf("Creating resource for inline volume")
if iDriver, ok := driver.(drivers.InlineVolumeTestDriver); ok {
r.volSource = iDriver.GetVolumeSource(false, fsType, r.driverTestResource)
r.volType = dInfo.Name
}
case testpatterns.PreprovisionedPV:
framework.Logf("Creating resource for pre-provisioned PV")
if pDriver, ok := driver.(drivers.PreprovisionedPVTestDriver); ok {
pvSource := pDriver.GetPersistentVolumeSource(false, fsType, r.driverTestResource)
if pvSource != nil {
r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPV(f, dInfo.Name, pvSource, false)
}
r.volType = fmt.Sprintf("%s-preprovisionedPV", dInfo.Name)
}
case testpatterns.DynamicPV:
framework.Logf("Creating resource for dynamic PV")
if dDriver, ok := driver.(drivers.DynamicPVTestDriver); ok {
claimSize := "2Gi"
r.sc = dDriver.GetDynamicProvisionStorageClass(fsType)
By("creating a StorageClass " + r.sc.Name)
var err error
r.sc, err = cs.StorageV1().StorageClasses().Create(r.sc)
Expect(err).NotTo(HaveOccurred())
if r.sc != nil {
r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPVFromDynamicProvisionSC(
f, dInfo.Name, claimSize, r.sc, false, nil)
}
r.volType = fmt.Sprintf("%s-dynamicPV", dInfo.Name)
}
default:
framework.Failf("genericVolumeTestResource doesn't support: %s", volType)
}
if r.volSource == nil {
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, volType)
}
}
// CleanupResource clean up genericVolumeTestResource
func (r *genericVolumeTestResource) cleanupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
volType := pattern.VolType
if r.pvc != nil || r.pv != nil {
switch volType {
case testpatterns.PreprovisionedPV:
By("Deleting pv and pvc")
if errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.pv, r.pvc); len(errs) != 0 {
framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs))
}
case testpatterns.DynamicPV:
By("Deleting pvc")
// We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner
if r.pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
framework.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v",
r.pv.Name, v1.PersistentVolumeReclaimDelete)
}
err := framework.DeletePersistentVolumeClaim(f.ClientSet, r.pvc.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to delete PVC %v", r.pvc.Name)
err = framework.WaitForPersistentVolumeDeleted(f.ClientSet, r.pv.Name, 5*time.Second, 5*time.Minute)
framework.ExpectNoError(err, "Persistent Volume %v not deleted by dynamic provisioner", r.pv.Name)
default:
framework.Failf("Found PVC (%v) or PV (%v) but not running Preprovisioned or Dynamic test pattern", r.pvc, r.pv)
}
}
if r.sc != nil {
By("Deleting sc")
deleteStorageClass(f.ClientSet, r.sc.Name)
}
// Cleanup volume for pre-provisioned volume tests
drivers.DeleteVolume(driver, volType, r.driverTestResource)
}
func createVolumeSourceWithPVCPV(
f *framework.Framework,
name string,
pvSource *v1.PersistentVolumeSource,
readOnly bool,
) (*v1.VolumeSource, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
pvConfig := framework.PersistentVolumeConfig{
NamePrefix: fmt.Sprintf("%s-", name),
StorageClassName: f.Namespace.Name,
PVSource: *pvSource,
}
pvcConfig := framework.PersistentVolumeClaimConfig{
StorageClassName: &f.Namespace.Name,
}
framework.Logf("Creating PVC and PV")
pv, pvc, err := framework.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false)
Expect(err).NotTo(HaveOccurred(), "PVC, PV creation failed")
err = framework.WaitOnPVandPVC(f.ClientSet, f.Namespace.Name, pv, pvc)
Expect(err).NotTo(HaveOccurred(), "PVC, PV failed to bind")
volSource := &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
ReadOnly: readOnly,
},
}
return volSource, pv, pvc
}
func createVolumeSourceWithPVCPVFromDynamicProvisionSC(
f *framework.Framework,
name string,
claimSize string,
sc *storagev1.StorageClass,
readOnly bool,
volMode *v1.PersistentVolumeMode,
) (*v1.VolumeSource, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
cs := f.ClientSet
ns := f.Namespace.Name
By("creating a claim")
pvc := getClaim(claimSize, ns)
pvc.Spec.StorageClassName = &sc.Name
if volMode != nil {
pvc.Spec.VolumeMode = volMode
}
var err error
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pv, err := cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
volSource := &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
ReadOnly: readOnly,
},
}
return volSource, pv, pvc
}
func getClaim(claimSize string, ns string) *v1.PersistentVolumeClaim {
claim := v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
Namespace: ns,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(claimSize),
},
},
},
}
return &claim
}
// deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
func deleteStorageClass(cs clientset.Interface, className string) {
err := cs.StorageV1().StorageClasses().Delete(className, nil)
if err != nil && !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
}

View File

@@ -0,0 +1,759 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testsuites
import (
"fmt"
"path/filepath"
"strings"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var (
volumePath = "/test-volume"
volumeName = "test-volume"
probeVolumePath = "/probe-volume"
probeFilePath = probeVolumePath + "/probe-file"
fileName = "test-file"
retryDuration = 20
mountImage = imageutils.GetE2EImage(imageutils.Mounttest)
)
type subPathTestSuite struct {
tsInfo TestSuiteInfo
}
var _ TestSuite = &subPathTestSuite{}
// InitSubPathTestSuite returns subPathTestSuite that implements TestSuite interface
func InitSubPathTestSuite() TestSuite {
return &subPathTestSuite{
tsInfo: TestSuiteInfo{
name: "subPath",
testPatterns: []testpatterns.TestPattern{
testpatterns.DefaultFsInlineVolume,
testpatterns.DefaultFsPreprovisionedPV,
testpatterns.DefaultFsDynamicPV,
},
},
}
}
func (s *subPathTestSuite) getTestSuiteInfo() TestSuiteInfo {
return s.tsInfo
}
func (s *subPathTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) {
}
func createSubPathTestInput(pattern testpatterns.TestPattern, resource subPathTestResource) subPathTestInput {
driver := resource.driver
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
subPath := f.Namespace.Name
subPathDir := filepath.Join(volumePath, subPath)
return subPathTestInput{
f: f,
subPathDir: subPathDir,
filePathInSubpath: filepath.Join(volumePath, fileName),
filePathInVolume: filepath.Join(subPathDir, fileName),
volType: resource.volType,
pod: resource.pod,
formatPod: resource.formatPod,
volSource: resource.genericVolumeTestResource.volSource,
roVol: resource.roVolSource,
}
}
func (s *subPathTestSuite) execTest(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(s, pattern), func() {
var (
resource subPathTestResource
input subPathTestInput
needsCleanup bool
)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(s, driver, pattern)
needsCleanup = true
// Setup test resource for driver and testpattern
resource = subPathTestResource{}
resource.setupResource(driver, pattern)
// Create test input
input = createSubPathTestInput(pattern, resource)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
}
})
testSubPath(&input)
})
}
type subPathTestResource struct {
genericVolumeTestResource
roVolSource *v1.VolumeSource
pod *v1.Pod
formatPod *v1.Pod
}
var _ TestResource = &subPathTestResource{}
func (s *subPathTestResource) setupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
s.driver = driver
dInfo := s.driver.GetDriverInfo()
f := dInfo.Framework
fsType := pattern.FsType
volType := pattern.VolType
// Setup generic test resource
s.genericVolumeTestResource.setupResource(driver, pattern)
// Setup subPath test dependent resource
switch volType {
case testpatterns.InlineVolume:
if iDriver, ok := driver.(drivers.InlineVolumeTestDriver); ok {
s.roVolSource = iDriver.GetVolumeSource(true, fsType, s.genericVolumeTestResource.driverTestResource)
}
case testpatterns.PreprovisionedPV:
s.roVolSource = &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: s.genericVolumeTestResource.pvc.Name,
ReadOnly: true,
},
}
case testpatterns.DynamicPV:
s.roVolSource = &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: s.genericVolumeTestResource.pvc.Name,
ReadOnly: true,
},
}
default:
framework.Failf("SubPath test doesn't support: %s", volType)
}
subPath := f.Namespace.Name
config := dInfo.Config
s.pod = SubpathTestPod(f, subPath, s.volType, s.volSource, true)
s.pod.Spec.NodeName = config.ClientNodeName
s.pod.Spec.NodeSelector = config.NodeSelector
s.formatPod = volumeFormatPod(f, s.volSource)
s.formatPod.Spec.NodeName = config.ClientNodeName
s.formatPod.Spec.NodeSelector = config.NodeSelector
}
func (s *subPathTestResource) cleanupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
// Cleanup subPath test dependent resource
By("Deleting pod")
err := framework.DeletePodWithWait(f, f.ClientSet, s.pod)
Expect(err).ToNot(HaveOccurred(), "while deleting pod")
// Cleanup generic test resource
s.genericVolumeTestResource.cleanupResource(driver, pattern)
}
type subPathTestInput struct {
f *framework.Framework
subPathDir string
filePathInSubpath string
filePathInVolume string
volType string
pod *v1.Pod
formatPod *v1.Pod
volSource *v1.VolumeSource
roVol *v1.VolumeSource
}
func testSubPath(input *subPathTestInput) {
It("should support non-existent path", func() {
// Write the file in the subPath from container 0
setWriteCommand(input.filePathInSubpath, &input.pod.Spec.Containers[0])
// Read it from outside the subPath from container 1
testReadFile(input.f, input.filePathInVolume, input.pod, 1)
})
It("should support existing directory", func() {
// Create the directory
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir))
// Write the file in the subPath from container 0
setWriteCommand(input.filePathInSubpath, &input.pod.Spec.Containers[0])
// Read it from outside the subPath from container 1
testReadFile(input.f, input.filePathInVolume, input.pod, 1)
})
It("should support existing single file", func() {
// Create the file in the init container
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", input.subPathDir, input.filePathInVolume))
// Read it from inside the subPath from container 0
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
})
It("should support file as subpath", func() {
// Create the file in the init container
setInitCommand(input.pod, fmt.Sprintf("echo %s > %s", input.f.Namespace.Name, input.subPathDir))
TestBasicSubpath(input.f, input.f.Namespace.Name, input.pod)
})
It("should fail if subpath directory is outside the volume [Slow]", func() {
// Create the subpath outside the volume
setInitCommand(input.pod, fmt.Sprintf("ln -s /bin %s", input.subPathDir))
// Pod should fail
testPodFailSubpath(input.f, input.pod)
})
It("should fail if subpath file is outside the volume [Slow]", func() {
// Create the subpath outside the volume
setInitCommand(input.pod, fmt.Sprintf("ln -s /bin/sh %s", input.subPathDir))
// Pod should fail
testPodFailSubpath(input.f, input.pod)
})
It("should fail if non-existent subpath is outside the volume [Slow]", func() {
// Create the subpath outside the volume
setInitCommand(input.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", input.subPathDir))
// Pod should fail
testPodFailSubpath(input.f, input.pod)
})
It("should fail if subpath with backstepping is outside the volume [Slow]", func() {
// Create the subpath outside the volume
setInitCommand(input.pod, fmt.Sprintf("ln -s ../ %s", input.subPathDir))
// Pod should fail
testPodFailSubpath(input.f, input.pod)
})
It("should support creating multiple subpath from same volumes [Slow]", func() {
subpathDir1 := filepath.Join(volumePath, "subpath1")
subpathDir2 := filepath.Join(volumePath, "subpath2")
filepath1 := filepath.Join("/test-subpath1", fileName)
filepath2 := filepath.Join("/test-subpath2", fileName)
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2))
addSubpathVolumeContainer(&input.pod.Spec.Containers[0], v1.VolumeMount{
Name: volumeName,
MountPath: "/test-subpath1",
SubPath: "subpath1",
})
addSubpathVolumeContainer(&input.pod.Spec.Containers[0], v1.VolumeMount{
Name: volumeName,
MountPath: "/test-subpath2",
SubPath: "subpath2",
})
addMultipleWrites(&input.pod.Spec.Containers[0], filepath1, filepath2)
testMultipleReads(input.f, input.pod, 0, filepath1, filepath2)
})
It("should support restarting containers using directory as subpath [Slow]", func() {
// Create the directory
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %v; touch %v", input.subPathDir, probeFilePath))
testPodContainerRestart(input.f, input.pod)
})
It("should support restarting containers using file as subpath [Slow]", func() {
// Create the file
setInitCommand(input.pod, fmt.Sprintf("touch %v; touch %v", input.subPathDir, probeFilePath))
testPodContainerRestart(input.f, input.pod)
})
It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() {
testSubpathReconstruction(input.f, input.pod, false)
})
It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() {
if input.volType == "hostPath" || input.volType == "hostPathSymlink" {
framework.Skipf("%s volume type does not support reconstruction, skipping", input.volType)
}
testSubpathReconstruction(input.f, input.pod, true)
})
It("should support readOnly directory specified in the volumeMount", func() {
// Create the directory
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir))
// Write the file in the volume from container 1
setWriteCommand(input.filePathInVolume, &input.pod.Spec.Containers[1])
// Read it from inside the subPath from container 0
input.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
})
It("should support readOnly file specified in the volumeMount", func() {
// Create the file
setInitCommand(input.pod, fmt.Sprintf("touch %s", input.subPathDir))
// Write the file in the volume from container 1
setWriteCommand(input.subPathDir, &input.pod.Spec.Containers[1])
// Read it from inside the subPath from container 0
input.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
testReadFile(input.f, volumePath, input.pod, 0)
})
It("should support existing directories when readOnly specified in the volumeSource", func() {
if input.roVol == nil {
framework.Skipf("Volume type %v doesn't support readOnly source", input.volType)
}
// Initialize content in the volume while it's writable
initVolumeContent(input.f, input.pod, input.filePathInVolume, input.filePathInSubpath)
// Set volume source to read only
input.pod.Spec.Volumes[0].VolumeSource = *input.roVol
// Read it from inside the subPath from container 0
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
})
It("should fail for new directories when readOnly specified in the volumeSource", func() {
if input.roVol == nil {
framework.Skipf("Volume type %v doesn't support readOnly source", input.volType)
}
// Format the volume while it's writable
formatVolume(input.f, input.formatPod)
// Set volume source to read only
input.pod.Spec.Volumes[0].VolumeSource = *input.roVol
// Pod should fail
testPodFailSubpathError(input.f, input.pod, "")
})
// TODO: add a test case for the same disk with two partitions
}
// TestBasicSubpath runs basic subpath test
func TestBasicSubpath(f *framework.Framework, contents string, pod *v1.Pod) {
TestBasicSubpathFile(f, contents, pod, volumePath)
}
// TestBasicSubpathFile runs basic subpath file test
func TestBasicSubpathFile(f *framework.Framework, contents string, pod *v1.Pod, filepath string) {
setReadCommand(filepath, &pod.Spec.Containers[0])
By(fmt.Sprintf("Creating pod %s", pod.Name))
f.TestContainerOutput("atomic-volume-subpath", pod, 0, []string{contents})
By(fmt.Sprintf("Deleting pod %s", pod.Name))
err := framework.DeletePodWithWait(f, f.ClientSet, pod)
Expect(err).NotTo(HaveOccurred(), "while deleting pod")
}
// SubpathTestPod returns a pod spec for subpath tests
func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *v1.VolumeSource, privilegedSecurityContext bool) *v1.Pod {
var (
suffix = strings.ToLower(fmt.Sprintf("%s-%s", volumeType, rand.String(4)))
gracePeriod = int64(1)
probeVolumeName = "liveness-probe-volume"
)
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("pod-subpath-test-%s", suffix),
Namespace: f.Namespace.Name,
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: fmt.Sprintf("init-volume-%s", suffix),
Image: imageutils.GetE2EImage(imageutils.BusyBox),
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumePath,
},
{
Name: probeVolumeName,
MountPath: probeVolumePath,
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &privilegedSecurityContext,
},
},
},
Containers: []v1.Container{
{
Name: fmt.Sprintf("test-container-subpath-%s", suffix),
Image: mountImage,
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumePath,
SubPath: subpath,
},
{
Name: probeVolumeName,
MountPath: probeVolumePath,
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &privilegedSecurityContext,
},
},
{
Name: fmt.Sprintf("test-container-volume-%s", suffix),
Image: mountImage,
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumePath,
},
{
Name: probeVolumeName,
MountPath: probeVolumePath,
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &privilegedSecurityContext,
},
},
},
RestartPolicy: v1.RestartPolicyNever,
TerminationGracePeriodSeconds: &gracePeriod,
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: *source,
},
{
Name: probeVolumeName,
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
SecurityContext: &v1.PodSecurityContext{
SELinuxOptions: &v1.SELinuxOptions{
Level: "s0:c0,c1",
},
},
},
}
}
// volumeFormatPod returns a Pod that does nothing but will cause the plugin to format a filesystem
// on first use
func volumeFormatPod(f *framework.Framework, volumeSource *v1.VolumeSource) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("volume-prep-%s", f.Namespace.Name),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: fmt.Sprintf("init-volume-%s", f.Namespace.Name),
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-ec", "echo nothing"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: "/vol",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: *volumeSource,
},
},
},
}
}
func clearSubpathPodCommands(pod *v1.Pod) {
pod.Spec.InitContainers[0].Command = nil
pod.Spec.Containers[0].Args = nil
pod.Spec.Containers[1].Args = nil
}
func setInitCommand(pod *v1.Pod, command string) {
pod.Spec.InitContainers[0].Command = []string{"/bin/sh", "-ec", command}
}
func setWriteCommand(file string, container *v1.Container) {
container.Args = []string{
fmt.Sprintf("--new_file_0644=%v", file),
fmt.Sprintf("--file_mode=%v", file),
}
}
func addSubpathVolumeContainer(container *v1.Container, volumeMount v1.VolumeMount) {
existingMounts := container.VolumeMounts
container.VolumeMounts = append(existingMounts, volumeMount)
}
func addMultipleWrites(container *v1.Container, file1 string, file2 string) {
container.Args = []string{
fmt.Sprintf("--new_file_0644=%v", file1),
fmt.Sprintf("--new_file_0666=%v", file2),
}
}
func testMultipleReads(f *framework.Framework, pod *v1.Pod, containerIndex int, file1 string, file2 string) {
By(fmt.Sprintf("Creating pod %s", pod.Name))
f.TestContainerOutput("multi_subpath", pod, containerIndex, []string{
"content of file \"" + file1 + "\": mount-tester new file",
"content of file \"" + file2 + "\": mount-tester new file",
})
}
func setReadCommand(file string, container *v1.Container) {
container.Args = []string{
fmt.Sprintf("--file_content_in_loop=%v", file),
fmt.Sprintf("--retry_time=%d", retryDuration),
}
}
func testReadFile(f *framework.Framework, file string, pod *v1.Pod, containerIndex int) {
setReadCommand(file, &pod.Spec.Containers[containerIndex])
By(fmt.Sprintf("Creating pod %s", pod.Name))
f.TestContainerOutput("subpath", pod, containerIndex, []string{
"content of file \"" + file + "\": mount-tester new file",
})
By(fmt.Sprintf("Deleting pod %s", pod.Name))
err := framework.DeletePodWithWait(f, f.ClientSet, pod)
Expect(err).NotTo(HaveOccurred(), "while deleting pod")
}
func testPodFailSubpath(f *framework.Framework, pod *v1.Pod) {
testPodFailSubpathError(f, pod, "subPath")
}
func testPodFailSubpathError(f *framework.Framework, pod *v1.Pod, errorMsg string) {
By(fmt.Sprintf("Creating pod %s", pod.Name))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred(), "while creating pod")
defer func() {
framework.DeletePodWithWait(f, f.ClientSet, pod)
}()
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
Expect(err).To(HaveOccurred(), "while waiting for pod to be running")
By("Checking for subpath error event")
selector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.name": pod.Name,
"involvedObject.namespace": f.Namespace.Name,
"reason": "Failed",
}.AsSelector().String()
options := metav1.ListOptions{FieldSelector: selector}
events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(options)
Expect(err).NotTo(HaveOccurred(), "while getting pod events")
Expect(len(events.Items)).NotTo(Equal(0), "no events found")
Expect(events.Items[0].Message).To(ContainSubstring(errorMsg), fmt.Sprintf("%q error not found", errorMsg))
}
// Tests that the existing subpath mount is detected when a container restarts
func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.BusyBox)
pod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox)
pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
// Add liveness probe to subpath container
pod.Spec.Containers[0].LivenessProbe = &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"cat", probeFilePath},
},
},
InitialDelaySeconds: 1,
FailureThreshold: 1,
PeriodSeconds: 2,
}
// Start pod
By(fmt.Sprintf("Creating pod %s", pod.Name))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred(), "while creating pod")
defer func() {
framework.DeletePodWithWait(f, f.ClientSet, pod)
}()
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running")
By("Failing liveness probe")
out, err := podContainerExec(pod, 1, fmt.Sprintf("rm %v", probeFilePath))
framework.Logf("Pod exec output: %v", out)
Expect(err).ToNot(HaveOccurred(), "while failing liveness probe")
// Check that container has restarted
By("Waiting for container to restart")
restarts := int32(0)
err = wait.PollImmediate(10*time.Second, 2*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, status := range pod.Status.ContainerStatuses {
if status.Name == pod.Spec.Containers[0].Name {
framework.Logf("Container %v, restarts: %v", status.Name, status.RestartCount)
restarts = status.RestartCount
if restarts > 0 {
framework.Logf("Container has restart count: %v", restarts)
return true, nil
}
}
}
return false, nil
})
Expect(err).ToNot(HaveOccurred(), "while waiting for container to restart")
// Fix liveness probe
By("Rewriting the file")
writeCmd := fmt.Sprintf("echo test-after > %v", probeFilePath)
out, err = podContainerExec(pod, 1, writeCmd)
framework.Logf("Pod exec output: %v", out)
Expect(err).ToNot(HaveOccurred(), "while rewriting the probe file")
// Wait for container restarts to stabilize
By("Waiting for container to stop restarting")
stableCount := int(0)
stableThreshold := int(time.Minute / framework.Poll)
err = wait.PollImmediate(framework.Poll, 2*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, status := range pod.Status.ContainerStatuses {
if status.Name == pod.Spec.Containers[0].Name {
if status.RestartCount == restarts {
stableCount++
if stableCount > stableThreshold {
framework.Logf("Container restart has stabilized")
return true, nil
}
} else {
restarts = status.RestartCount
stableCount = 0
framework.Logf("Container has restart count: %v", restarts)
}
break
}
}
return false, nil
})
Expect(err).ToNot(HaveOccurred(), "while waiting for container to stabilize")
}
func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete bool) {
// This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption()
// Change to busybox
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.BusyBox)
pod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox)
pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
// If grace period is too short, then there is not enough time for the volume
// manager to cleanup the volumes
gracePeriod := int64(30)
pod.Spec.TerminationGracePeriodSeconds = &gracePeriod
By(fmt.Sprintf("Creating pod %s", pod.Name))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred(), "while creating pod")
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred(), "while getting pod")
utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true)
}
func formatVolume(f *framework.Framework, pod *v1.Pod) {
By(fmt.Sprintf("Creating pod to format volume %s", pod.Name))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred(), "while creating volume init pod")
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
Expect(err).ToNot(HaveOccurred(), "while waiting for volume init pod to succeed")
err = framework.DeletePodWithWait(f, f.ClientSet, pod)
Expect(err).ToNot(HaveOccurred(), "while deleting volume init pod")
}
func initVolumeContent(f *framework.Framework, pod *v1.Pod, volumeFilepath, subpathFilepath string) {
setWriteCommand(volumeFilepath, &pod.Spec.Containers[1])
setReadCommand(subpathFilepath, &pod.Spec.Containers[0])
By(fmt.Sprintf("Creating pod to write volume content %s", pod.Name))
f.TestContainerOutput("subpath", pod, 0, []string{
"content of file \"" + subpathFilepath + "\": mount-tester new file",
})
By(fmt.Sprintf("Deleting pod %s", pod.Name))
err := framework.DeletePodWithWait(f, f.ClientSet, pod)
Expect(err).NotTo(HaveOccurred(), "while deleting pod")
// This pod spec is going to be reused; reset all the commands
clearSubpathPodCommands(pod)
}
func podContainerExec(pod *v1.Pod, containerIndex int, bashExec string) (string, error) {
return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", "/bin/sh", "-c", bashExec)
}

View File

@@ -0,0 +1,359 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* This test checks that the plugin VolumeSources are working when pseudo-streaming
* various write sizes to mounted files.
*/
package testsuites
import (
"fmt"
"math"
"path"
"strconv"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
// MD5 hashes of the test file corresponding to each file size.
// Test files are generated in testVolumeIO()
// If test file generation algorithm changes, these must be recomputed.
var md5hashes = map[int64]string{
testpatterns.FileSizeSmall: "5c34c2813223a7ca05a3c2f38c0d1710",
testpatterns.FileSizeMedium: "f2fa202b1ffeedda5f3a58bd1ae81104",
testpatterns.FileSizeLarge: "8d763edc71bd16217664793b5a15e403",
}
type volumeIOTestSuite struct {
tsInfo TestSuiteInfo
}
var _ TestSuite = &volumeIOTestSuite{}
// InitVolumeIOTestSuite returns volumeIOTestSuite that implements TestSuite interface
func InitVolumeIOTestSuite() TestSuite {
return &volumeIOTestSuite{
tsInfo: TestSuiteInfo{
name: "volumeIO",
testPatterns: []testpatterns.TestPattern{
testpatterns.DefaultFsInlineVolume,
testpatterns.DefaultFsPreprovisionedPV,
testpatterns.DefaultFsDynamicPV,
},
},
}
}
func (t *volumeIOTestSuite) getTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *volumeIOTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) {
}
func createVolumeIOTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumeIOTestInput {
var fsGroup *int64
driver := resource.driver
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
fileSizes := createFileSizes(dInfo.MaxFileSize)
volSource := resource.volSource
if volSource == nil {
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
}
if dInfo.IsFsGroupSupported {
fsGroupVal := int64(1234)
fsGroup = &fsGroupVal
}
return volumeIOTestInput{
f: f,
name: dInfo.Name,
config: dInfo.Config,
volSource: *volSource,
testFile: fmt.Sprintf("%s_io_test_%s", dInfo.Name, f.Namespace.Name),
podSec: v1.PodSecurityContext{
FSGroup: fsGroup,
},
fileSizes: fileSizes,
}
}
func (t *volumeIOTestSuite) execTest(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(t, pattern), func() {
var (
resource genericVolumeTestResource
input volumeIOTestInput
needsCleanup bool
)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(t, driver, pattern)
needsCleanup = true
// Setup test resource for driver and testpattern
resource = genericVolumeTestResource{}
resource.setupResource(driver, pattern)
// Create test input
input = createVolumeIOTestInput(pattern, resource)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
}
})
execTestVolumeIO(&input)
})
}
type volumeIOTestInput struct {
f *framework.Framework
name string
config framework.VolumeTestConfig
volSource v1.VolumeSource
testFile string
podSec v1.PodSecurityContext
fileSizes []int64
}
func execTestVolumeIO(input *volumeIOTestInput) {
It("should write files of various sizes, verify size, validate content [Slow]", func() {
f := input.f
cs := f.ClientSet
err := testVolumeIO(f, cs, input.config, input.volSource, &input.podSec, input.testFile, input.fileSizes)
Expect(err).NotTo(HaveOccurred())
})
}
func createFileSizes(maxFileSize int64) []int64 {
allFileSizes := []int64{
testpatterns.FileSizeSmall,
testpatterns.FileSizeMedium,
testpatterns.FileSizeLarge,
}
fileSizes := []int64{}
for _, size := range allFileSizes {
if size <= maxFileSize {
fileSizes = append(fileSizes, size)
}
}
return fileSizes
}
// Return the plugin's client pod spec. Use an InitContainer to setup the file i/o test env.
func makePodSpec(config framework.VolumeTestConfig, dir, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod {
volName := fmt.Sprintf("%s-%s", config.Prefix, "io-volume")
var gracePeriod int64 = 1
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-io-client",
Labels: map[string]string{
"role": config.Prefix + "-io-client",
},
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: config.Prefix + "-io-init",
Image: framework.BusyBoxImage,
Command: []string{
"/bin/sh",
"-c",
initCmd,
},
VolumeMounts: []v1.VolumeMount{
{
Name: volName,
MountPath: dir,
},
},
},
},
Containers: []v1.Container{
{
Name: config.Prefix + "-io-client",
Image: framework.BusyBoxImage,
Command: []string{
"/bin/sh",
"-c",
"sleep 3600", // keep pod alive until explicitly deleted
},
VolumeMounts: []v1.VolumeMount{
{
Name: volName,
MountPath: dir,
},
},
},
},
TerminationGracePeriodSeconds: &gracePeriod,
SecurityContext: podSecContext,
Volumes: []v1.Volume{
{
Name: volName,
VolumeSource: volsrc,
},
},
RestartPolicy: v1.RestartPolicyNever, // want pod to fail if init container fails
NodeName: config.ClientNodeName,
NodeSelector: config.NodeSelector,
},
}
}
// Write `fsize` bytes to `fpath` in the pod, using dd and the `ddInput` file.
func writeToFile(pod *v1.Pod, fpath, ddInput string, fsize int64) error {
By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath))
loopCnt := fsize / testpatterns.MinFileSize
writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, ddInput, testpatterns.MinFileSize, fpath)
_, err := utils.PodExec(pod, writeCmd)
return err
}
// Verify that the test file is the expected size and contains the expected content.
func verifyFile(pod *v1.Pod, fpath string, expectSize int64, ddInput string) error {
By("verifying file size")
rtnstr, err := utils.PodExec(pod, fmt.Sprintf("stat -c %%s %s", fpath))
if err != nil || rtnstr == "" {
return fmt.Errorf("unable to get file size via `stat %s`: %v", fpath, err)
}
size, err := strconv.Atoi(strings.TrimSuffix(rtnstr, "\n"))
if err != nil {
return fmt.Errorf("unable to convert string %q to int: %v", rtnstr, err)
}
if int64(size) != expectSize {
return fmt.Errorf("size of file %s is %d, expected %d", fpath, size, expectSize)
}
By("verifying file hash")
rtnstr, err = utils.PodExec(pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath))
if err != nil {
return fmt.Errorf("unable to test file hash via `md5sum %s`: %v", fpath, err)
}
actualHash := strings.TrimSuffix(rtnstr, "\n")
expectedHash, ok := md5hashes[expectSize]
if !ok {
return fmt.Errorf("File hash is unknown for file size %d. Was a new file size added to the test suite?",
expectSize)
}
if actualHash != expectedHash {
return fmt.Errorf("MD5 hash is incorrect for file %s with size %d. Expected: `%s`; Actual: `%s`",
fpath, expectSize, expectedHash, actualHash)
}
return nil
}
// Delete `fpath` to save some disk space on host. Delete errors are logged but ignored.
func deleteFile(pod *v1.Pod, fpath string) {
By(fmt.Sprintf("deleting test file %s...", fpath))
_, err := utils.PodExec(pod, fmt.Sprintf("rm -f %s", fpath))
if err != nil {
// keep going, the test dir will be deleted when the volume is unmounted
framework.Logf("unable to delete test file %s: %v\nerror ignored, continuing test", fpath, err)
}
}
// Create the client pod and create files of the sizes passed in by the `fsizes` parameter. Delete the
// client pod and the new files when done.
// Note: the file name is appended to "/opt/<Prefix>/<namespace>", eg. "/opt/nfs/e2e-.../<file>".
// Note: nil can be passed for the podSecContext parm, in which case it is ignored.
// Note: `fsizes` values are enforced to each be at least `MinFileSize` and a multiple of `MinFileSize`
// bytes.
func testVolumeIO(f *framework.Framework, cs clientset.Interface, config framework.VolumeTestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) {
dir := path.Join("/opt", config.Prefix, config.Namespace)
ddInput := path.Join(dir, "dd_if")
writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value
loopCnt := testpatterns.MinFileSize / int64(len(writeBlk))
// initContainer cmd to create and fill dd's input file. The initContainer is used to create
// the `dd` input file which is currently 1MiB. Rather than store a 1MiB go value, a loop is
// used to create a 1MiB file in the target directory.
initCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do echo -n %s >>%s; let i+=1; done", loopCnt, writeBlk, ddInput)
clientPod := makePodSpec(config, dir, initCmd, volsrc, podSecContext)
By(fmt.Sprintf("starting %s", clientPod.Name))
podsNamespacer := cs.CoreV1().Pods(config.Namespace)
clientPod, err = podsNamespacer.Create(clientPod)
if err != nil {
return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err)
}
defer func() {
// note the test dir will be removed when the kubelet unmounts it
By(fmt.Sprintf("deleting client pod %q...", clientPod.Name))
e := framework.DeletePodWithWait(f, cs, clientPod)
if e != nil {
framework.Logf("client pod failed to delete: %v", e)
if err == nil { // delete err is returned if err is not set
err = e
}
} else {
framework.Logf("sleeping a bit so kubelet can unmount and detach the volume")
time.Sleep(framework.PodCleanupTimeout)
}
}()
err = framework.WaitForPodRunningInNamespace(cs, clientPod)
if err != nil {
return fmt.Errorf("client pod %q not running: %v", clientPod.Name, err)
}
// create files of the passed-in file sizes and verify test file size and content
for _, fsize := range fsizes {
// file sizes must be a multiple of `MinFileSize`
if math.Mod(float64(fsize), float64(testpatterns.MinFileSize)) != 0 {
fsize = fsize/testpatterns.MinFileSize + testpatterns.MinFileSize
}
fpath := path.Join(dir, fmt.Sprintf("%s-%d", file, fsize))
if err = writeToFile(clientPod, fpath, ddInput, fsize); err != nil {
return err
}
if err = verifyFile(clientPod, fpath, fsize, ddInput); err != nil {
return err
}
deleteFile(clientPod, fpath)
}
return
}

View File

@@ -0,0 +1,445 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testsuites
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
noProvisioner = "kubernetes.io/no-provisioner"
pvNamePrefix = "pv"
)
type volumeModeTestSuite struct {
tsInfo TestSuiteInfo
}
var _ TestSuite = &volumeModeTestSuite{}
// InitVolumeModeTestSuite returns volumeModeTestSuite that implements TestSuite interface
func InitVolumeModeTestSuite() TestSuite {
return &volumeModeTestSuite{
tsInfo: TestSuiteInfo{
name: "volumeMode",
featureTag: "[Feature:BlockVolume]",
testPatterns: []testpatterns.TestPattern{
testpatterns.FsVolModePreprovisionedPV,
testpatterns.FsVolModeDynamicPV,
testpatterns.BlockVolModePreprovisionedPV,
testpatterns.BlockVolModeDynamicPV,
},
},
}
}
func (t *volumeModeTestSuite) getTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *volumeModeTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) {
}
func createVolumeModeTestInput(pattern testpatterns.TestPattern, resource volumeModeTestResource) volumeModeTestInput {
driver := resource.driver
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
return volumeModeTestInput{
f: f,
sc: resource.sc,
pvc: resource.pvc,
pv: resource.pv,
testVolType: pattern.VolType,
nodeName: dInfo.Config.ClientNodeName,
volMode: pattern.VolMode,
isBlockSupported: dInfo.IsBlockSupported,
}
}
func getVolumeModeTestFunc(pattern testpatterns.TestPattern, driver drivers.TestDriver) func(*volumeModeTestInput) {
dInfo := driver.GetDriverInfo()
isBlockSupported := dInfo.IsBlockSupported
volMode := pattern.VolMode
volType := pattern.VolType
switch volType {
case testpatterns.PreprovisionedPV:
if volMode == v1.PersistentVolumeBlock && !isBlockSupported {
return testVolumeModeFailForPreprovisionedPV
}
return testVolumeModeSuccessForPreprovisionedPV
case testpatterns.DynamicPV:
if volMode == v1.PersistentVolumeBlock && !isBlockSupported {
return testVolumeModeFailForDynamicPV
}
return testVolumeModeSuccessForDynamicPV
default:
framework.Failf("Volume mode test doesn't support volType: %v", volType)
}
return nil
}
func (t *volumeModeTestSuite) execTest(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(t, pattern), func() {
var (
resource volumeModeTestResource
input volumeModeTestInput
testFunc func(*volumeModeTestInput)
needsCleanup bool
)
testFunc = getVolumeModeTestFunc(pattern, driver)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(t, driver, pattern)
needsCleanup = true
// Setup test resource for driver and testpattern
resource = volumeModeTestResource{}
resource.setupResource(driver, pattern)
// Create test input
input = createVolumeModeTestInput(pattern, resource)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
}
})
testFunc(&input)
})
}
type volumeModeTestResource struct {
driver drivers.TestDriver
sc *storagev1.StorageClass
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
driverTestResource interface{}
}
var _ TestResource = &volumeModeTestResource{}
func (s *volumeModeTestResource) setupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
s.driver = driver
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
ns := f.Namespace
fsType := pattern.FsType
volBindMode := storagev1.VolumeBindingImmediate
volMode := pattern.VolMode
volType := pattern.VolType
var (
scName string
pvSource *v1.PersistentVolumeSource
)
// Create volume for pre-provisioned volume tests
s.driverTestResource = drivers.CreateVolume(driver, volType)
switch volType {
case testpatterns.PreprovisionedPV:
if volMode == v1.PersistentVolumeBlock {
scName = fmt.Sprintf("%s-%s-sc-for-block", ns.Name, dInfo.Name)
} else if volMode == v1.PersistentVolumeFilesystem {
scName = fmt.Sprintf("%s-%s-sc-for-file", ns.Name, dInfo.Name)
}
if pDriver, ok := driver.(drivers.PreprovisionedPVTestDriver); ok {
pvSource = pDriver.GetPersistentVolumeSource(false, fsType, s.driverTestResource)
if pvSource == nil {
framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name)
}
sc, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, volMode, *pvSource)
s.sc = sc
s.pv = framework.MakePersistentVolume(pvConfig)
s.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns.Name)
}
case testpatterns.DynamicPV:
if dDriver, ok := driver.(drivers.DynamicPVTestDriver); ok {
s.sc = dDriver.GetDynamicProvisionStorageClass(fsType)
if s.sc == nil {
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
}
s.sc.VolumeBindingMode = &volBindMode
claimSize := "2Gi"
s.pvc = getClaim(claimSize, ns.Name)
s.pvc.Spec.StorageClassName = &s.sc.Name
s.pvc.Spec.VolumeMode = &volMode
}
default:
framework.Failf("Volume mode test doesn't support: %s", volType)
}
}
func (s *volumeModeTestResource) cleanupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
cs := f.ClientSet
ns := f.Namespace
volType := pattern.VolType
By("Deleting pv and pvc")
errs := framework.PVPVCCleanup(cs, ns.Name, s.pv, s.pvc)
if len(errs) > 0 {
framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
}
By("Deleting sc")
if s.sc != nil {
deleteStorageClass(cs, s.sc.Name)
}
// Cleanup volume for pre-provisioned volume tests
drivers.DeleteVolume(driver, volType, s.driverTestResource)
}
type volumeModeTestInput struct {
f *framework.Framework
sc *storagev1.StorageClass
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
testVolType testpatterns.TestVolType
nodeName string
volMode v1.PersistentVolumeMode
isBlockSupported bool
}
func testVolumeModeFailForPreprovisionedPV(input *volumeModeTestInput) {
It("should fail to create pod by failing to mount volume", func() {
f := input.f
cs := f.ClientSet
ns := f.Namespace
var err error
By("Creating sc")
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
input.pv, err = cs.CoreV1().PersistentVolumes().Create(input.pv)
Expect(err).NotTo(HaveOccurred())
// Prebind pv
input.pvc.Spec.VolumeName = input.pv.Name
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, input.pv, input.pvc))
By("Creating pod")
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc},
false, "", false, false, framework.SELinuxLabel,
nil, input.nodeName, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
}()
Expect(err).To(HaveOccurred())
})
}
func testVolumeModeSuccessForPreprovisionedPV(input *volumeModeTestInput) {
It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
f := input.f
cs := f.ClientSet
ns := f.Namespace
var err error
By("Creating sc")
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
input.pv, err = cs.CoreV1().PersistentVolumes().Create(input.pv)
Expect(err).NotTo(HaveOccurred())
// Prebind pv
input.pvc.Spec.VolumeName = input.pv.Name
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, input.pv, input.pvc))
By("Creating pod")
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc},
false, "", false, false, framework.SELinuxLabel,
nil, input.nodeName, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
}()
Expect(err).NotTo(HaveOccurred())
By("Checking if persistent volume exists as expected volume mode")
checkVolumeModeOfPath(pod, input.volMode, "/mnt/volume1")
By("Checking if read/write to persistent volume works properly")
checkReadWriteToPath(pod, input.volMode, "/mnt/volume1")
})
// TODO(mkimuram): Add more tests
}
func testVolumeModeFailForDynamicPV(input *volumeModeTestInput) {
It("should fail in binding dynamic provisioned PV to PVC", func() {
f := input.f
cs := f.ClientSet
ns := f.Namespace
var err error
By("Creating sc")
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, input.pvc.Namespace, input.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).To(HaveOccurred())
})
}
func testVolumeModeSuccessForDynamicPV(input *volumeModeTestInput) {
It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
f := input.f
cs := f.ClientSet
ns := f.Namespace
var err error
By("Creating sc")
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, input.pvc.Namespace, input.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(input.pvc.Namespace).Get(input.pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
input.pv, err = cs.CoreV1().PersistentVolumes().Get(input.pvc.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("Creating pod")
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc},
false, "", false, false, framework.SELinuxLabel,
nil, input.nodeName, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
}()
Expect(err).NotTo(HaveOccurred())
By("Checking if persistent volume exists as expected volume mode")
checkVolumeModeOfPath(pod, input.volMode, "/mnt/volume1")
By("Checking if read/write to persistent volume works properly")
checkReadWriteToPath(pod, input.volMode, "/mnt/volume1")
})
// TODO(mkimuram): Add more tests
}
func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1.VolumeBindingMode,
volMode v1.PersistentVolumeMode, pvSource v1.PersistentVolumeSource) (*storagev1.StorageClass,
framework.PersistentVolumeConfig, framework.PersistentVolumeClaimConfig) {
// StorageClass
scConfig := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: scName,
},
Provisioner: noProvisioner,
VolumeBindingMode: &volBindMode,
}
// PV
pvConfig := framework.PersistentVolumeConfig{
PVSource: pvSource,
NamePrefix: pvNamePrefix,
StorageClassName: scName,
VolumeMode: &volMode,
}
// PVC
pvcConfig := framework.PersistentVolumeClaimConfig{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &scName,
VolumeMode: &volMode,
}
return scConfig, pvConfig, pvcConfig
}
func checkVolumeModeOfPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
if volMode == v1.PersistentVolumeBlock {
// Check if block exists
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("test -b %s", path))
// Double check that it's not directory
utils.VerifyExecInPodFail(pod, fmt.Sprintf("test -d %s", path), 1)
} else {
// Check if directory exists
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("test -d %s", path))
// Double check that it's not block
utils.VerifyExecInPodFail(pod, fmt.Sprintf("test -b %s", path), 1)
}
}
func checkReadWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
if volMode == v1.PersistentVolumeBlock {
// random -> file1
utils.VerifyExecInPodSucceed(pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
// file1 -> dev (write to dev)
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
// dev -> file2 (read from dev)
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
// file1 == file2 (check contents)
utils.VerifyExecInPodSucceed(pod, "diff /tmp/file1 /tmp/file2")
// Clean up temp files
utils.VerifyExecInPodSucceed(pod, "rm -f /tmp/file1 /tmp/file2")
// Check that writing file to block volume fails
utils.VerifyExecInPodFail(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
} else {
// text -> file1 (write to file)
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
// grep file1 (read from file and check contents)
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("grep 'Hello world.' %s/file1.txt", path))
// Check that writing to directory as block volume fails
utils.VerifyExecInPodFail(pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
}
}

View File

@@ -0,0 +1,160 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This test checks that various VolumeSources are working.
// test/e2e/common/volumes.go duplicates the GlusterFS test from this file. Any changes made to this
// test should be made there as well.
package testsuites
import (
"fmt"
. "github.com/onsi/ginkgo"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
type volumesTestSuite struct {
tsInfo TestSuiteInfo
}
var _ TestSuite = &volumesTestSuite{}
// InitVolumesTestSuite returns volumesTestSuite that implements TestSuite interface
func InitVolumesTestSuite() TestSuite {
return &volumesTestSuite{
tsInfo: TestSuiteInfo{
name: "volumes",
testPatterns: []testpatterns.TestPattern{
// Default fsType
testpatterns.DefaultFsInlineVolume,
testpatterns.DefaultFsPreprovisionedPV,
testpatterns.DefaultFsDynamicPV,
// ext3
testpatterns.Ext3InlineVolume,
testpatterns.Ext3PreprovisionedPV,
testpatterns.Ext3DynamicPV,
// ext4
testpatterns.Ext4InlineVolume,
testpatterns.Ext4PreprovisionedPV,
testpatterns.Ext4DynamicPV,
// xfs
testpatterns.XfsInlineVolume,
testpatterns.XfsPreprovisionedPV,
testpatterns.XfsDynamicPV,
},
},
}
}
func (t *volumesTestSuite) getTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *volumesTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) {
dInfo := driver.GetDriverInfo()
if !dInfo.IsPersistent {
framework.Skipf("Driver %q does not provide persistency - skipping", dInfo.Name)
}
}
func createVolumesTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumesTestInput {
var fsGroup *int64
driver := resource.driver
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
volSource := resource.volSource
if volSource == nil {
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
}
if dInfo.IsFsGroupSupported {
fsGroupVal := int64(1234)
fsGroup = &fsGroupVal
}
return volumesTestInput{
f: f,
name: dInfo.Name,
config: dInfo.Config,
fsGroup: fsGroup,
tests: []framework.VolumeTest{
{
Volume: *volSource,
File: "index.html",
// Must match content
ExpectedContent: fmt.Sprintf("Hello from %s from namespace %s",
dInfo.Name, f.Namespace.Name),
},
},
}
}
func (t *volumesTestSuite) execTest(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(t, pattern), func() {
var (
resource genericVolumeTestResource
input volumesTestInput
needsCleanup bool
)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(t, driver, pattern)
needsCleanup = true
// Setup test resource for driver and testpattern
resource = genericVolumeTestResource{}
resource.setupResource(driver, pattern)
// Create test input
input = createVolumesTestInput(pattern, resource)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
}
})
testVolumes(&input)
})
}
type volumesTestInput struct {
f *framework.Framework
name string
config framework.VolumeTestConfig
fsGroup *int64
tests []framework.VolumeTest
}
func testVolumes(input *volumesTestInput) {
It("should be mountable", func() {
f := input.f
cs := f.ClientSet
defer framework.VolumeTestCleanup(f, input.config)
volumeTest := input.tests
framework.InjectHtml(cs, input.config, volumeTest[0].Volume, volumeTest[0].ExpectedContent)
framework.TestVolumeClient(cs, input.config, input.fsGroup, input.tests)
})
}

View File

@@ -13,12 +13,17 @@ go_library(
],
importpath = "k8s.io/kubernetes/test/e2e/storage/utils",
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)

View File

@@ -24,9 +24,14 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
uexec "k8s.io/utils/exec"
)
type KubeletOpt string
@@ -38,11 +43,51 @@ const (
KRestart KubeletOpt = "restart"
)
const (
// ClusterRole name for e2e test Priveledged Pod Security Policy User
podSecurityPolicyPrivilegedClusterRoleName = "e2e-test-privileged-psp"
)
// PodExec wraps RunKubectl to execute a bash cmd in target pod
func PodExec(pod *v1.Pod, bashExec string) (string, error) {
return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--", "/bin/sh", "-c", bashExec)
}
// VerifyExecInPodSucceed verifies bash cmd in target pod succeed
func VerifyExecInPodSucceed(pod *v1.Pod, bashExec string) {
_, err := PodExec(pod, bashExec)
if err != nil {
if err, ok := err.(uexec.CodeExitError); ok {
exitCode := err.ExitStatus()
Expect(err).NotTo(HaveOccurred(),
"%q should succeed, but failed with exit code %d and error message %q",
bashExec, exitCode, err)
} else {
Expect(err).NotTo(HaveOccurred(),
"%q should succeed, but failed with error message %q",
bashExec, err)
}
}
}
// VerifyExecInPodFail verifies bash cmd in target pod fail with certain exit code
func VerifyExecInPodFail(pod *v1.Pod, bashExec string, exitCode int) {
_, err := PodExec(pod, bashExec)
if err != nil {
if err, ok := err.(uexec.CodeExitError); ok {
actualExitCode := err.ExitStatus()
Expect(actualExitCode).To(Equal(exitCode),
"%q should fail with exit code %d, but failed with exit code %d and error message %q",
bashExec, exitCode, actualExitCode, err)
} else {
Expect(err).NotTo(HaveOccurred(),
"%q should fail with exit code %d, but failed with error message %q",
bashExec, exitCode, err)
}
}
Expect(err).To(HaveOccurred(), "%q should fail with exit code %d, but exit without error", bashExec, exitCode)
}
// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
// for the desired statues..
// - First issues the command via `systemctl`
@@ -244,7 +289,7 @@ func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
Containers: []v1.Container{
{
Name: "volume-tester",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
VolumeMounts: []v1.VolumeMount{
@@ -276,3 +321,123 @@ func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
}()
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
}
func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginName string) *v1.Pod {
podClient := c.CoreV1().Pods(ns)
provisionerPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "external-provisioner-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nfs-provisioner",
Image: "quay.io/kubernetes_incubator/nfs-provisioner:v2.1.0-k8s1.11",
SecurityContext: &v1.SecurityContext{
Capabilities: &v1.Capabilities{
Add: []v1.Capability{"DAC_READ_SEARCH"},
},
},
Args: []string{
"-provisioner=" + externalPluginName,
"-grace-period=0",
},
Ports: []v1.ContainerPort{
{Name: "nfs", ContainerPort: 2049},
{Name: "mountd", ContainerPort: 20048},
{Name: "rpcbind", ContainerPort: 111},
{Name: "rpcbind-udp", ContainerPort: 111, Protocol: v1.ProtocolUDP},
},
Env: []v1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
},
ImagePullPolicy: v1.PullIfNotPresent,
VolumeMounts: []v1.VolumeMount{
{
Name: "export-volume",
MountPath: "/export",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "export-volume",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
},
}
provisionerPod, err := podClient.Create(provisionerPod)
framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod))
By("locating the provisioner pod")
pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
return pod
}
func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface,
namespace string,
teardown bool,
saNames []string) {
bindingString := "Binding"
if teardown {
bindingString = "Unbinding"
}
roleBindingClient := client.RbacV1().RoleBindings(namespace)
for _, saName := range saNames {
By(fmt.Sprintf("%v priviledged Pod Security Policy to the service account %s", bindingString, saName))
binding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "psp-" + saName,
Namespace: namespace,
},
Subjects: []rbacv1.Subject{
{
Kind: rbacv1.ServiceAccountKind,
Name: saName,
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: podSecurityPolicyPrivilegedClusterRoleName,
APIGroup: "rbac.authorization.k8s.io",
},
}
roleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 2*time.Minute, func() (bool, error) {
_, err := roleBindingClient.Get(binding.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
continue
}
_, err = roleBindingClient.Create(binding)
framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
}
}

View File

@@ -1,434 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* This test checks that the plugin VolumeSources are working when pseudo-streaming
* various write sizes to mounted files. Note that the plugin is defined inline in
* the pod spec, not via a persistent volume and claim.
*
* These tests work only when privileged containers are allowed, exporting various
* filesystems (NFS, GlusterFS, ...) usually needs some mounting or other privileged
* magic in the server pod. Note that the server containers are for testing purposes
* only and should not be used in production.
*/
package storage
import (
"fmt"
"math"
"path"
"strconv"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
minFileSize = 1 * framework.MiB
fileSizeSmall = 1 * framework.MiB
fileSizeMedium = 100 * framework.MiB
fileSizeLarge = 1 * framework.GiB
)
// MD5 hashes of the test file corresponding to each file size.
// Test files are generated in testVolumeIO()
// If test file generation algorithm changes, these must be recomputed.
var md5hashes = map[int64]string{
fileSizeSmall: "5c34c2813223a7ca05a3c2f38c0d1710",
fileSizeMedium: "f2fa202b1ffeedda5f3a58bd1ae81104",
fileSizeLarge: "8d763edc71bd16217664793b5a15e403",
}
// Return the plugin's client pod spec. Use an InitContainer to setup the file i/o test env.
func makePodSpec(config framework.VolumeTestConfig, dir, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod {
volName := fmt.Sprintf("%s-%s", config.Prefix, "io-volume")
var gracePeriod int64 = 1
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-io-client",
Labels: map[string]string{
"role": config.Prefix + "-io-client",
},
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: config.Prefix + "-io-init",
Image: framework.BusyBoxImage,
Command: []string{
"/bin/sh",
"-c",
initCmd,
},
VolumeMounts: []v1.VolumeMount{
{
Name: volName,
MountPath: dir,
},
},
},
},
Containers: []v1.Container{
{
Name: config.Prefix + "-io-client",
Image: framework.BusyBoxImage,
Command: []string{
"/bin/sh",
"-c",
"sleep 3600", // keep pod alive until explicitly deleted
},
VolumeMounts: []v1.VolumeMount{
{
Name: volName,
MountPath: dir,
},
},
},
},
TerminationGracePeriodSeconds: &gracePeriod,
SecurityContext: podSecContext,
Volumes: []v1.Volume{
{
Name: volName,
VolumeSource: volsrc,
},
},
RestartPolicy: v1.RestartPolicyNever, // want pod to fail if init container fails
},
}
}
// Write `fsize` bytes to `fpath` in the pod, using dd and the `dd_input` file.
func writeToFile(pod *v1.Pod, fpath, dd_input string, fsize int64) error {
By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath))
loopCnt := fsize / minFileSize
writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, dd_input, minFileSize, fpath)
_, err := utils.PodExec(pod, writeCmd)
return err
}
// Verify that the test file is the expected size and contains the expected content.
func verifyFile(pod *v1.Pod, fpath string, expectSize int64, dd_input string) error {
By("verifying file size")
rtnstr, err := utils.PodExec(pod, fmt.Sprintf("stat -c %%s %s", fpath))
if err != nil || rtnstr == "" {
return fmt.Errorf("unable to get file size via `stat %s`: %v", fpath, err)
}
size, err := strconv.Atoi(strings.TrimSuffix(rtnstr, "\n"))
if err != nil {
return fmt.Errorf("unable to convert string %q to int: %v", rtnstr, err)
}
if int64(size) != expectSize {
return fmt.Errorf("size of file %s is %d, expected %d", fpath, size, expectSize)
}
By("verifying file hash")
rtnstr, err = utils.PodExec(pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath))
if err != nil {
return fmt.Errorf("unable to test file hash via `md5sum %s`: %v", fpath, err)
}
actualHash := strings.TrimSuffix(rtnstr, "\n")
expectedHash, ok := md5hashes[expectSize]
if !ok {
return fmt.Errorf("File hash is unknown for file size %d. Was a new file size added to the test suite?",
expectSize)
}
if actualHash != expectedHash {
return fmt.Errorf("MD5 hash is incorrect for file %s with size %d. Expected: `%s`; Actual: `%s`",
fpath, expectSize, expectedHash, actualHash)
}
return nil
}
// Delete `fpath` to save some disk space on host. Delete errors are logged but ignored.
func deleteFile(pod *v1.Pod, fpath string) {
By(fmt.Sprintf("deleting test file %s...", fpath))
_, err := utils.PodExec(pod, fmt.Sprintf("rm -f %s", fpath))
if err != nil {
// keep going, the test dir will be deleted when the volume is unmounted
framework.Logf("unable to delete test file %s: %v\nerror ignored, continuing test", fpath, err)
}
}
// Create the client pod and create files of the sizes passed in by the `fsizes` parameter. Delete the
// client pod and the new files when done.
// Note: the file name is appended to "/opt/<Prefix>/<namespace>", eg. "/opt/nfs/e2e-.../<file>".
// Note: nil can be passed for the podSecContext parm, in which case it is ignored.
// Note: `fsizes` values are enforced to each be at least `minFileSize` and a multiple of `minFileSize`
// bytes.
func testVolumeIO(f *framework.Framework, cs clientset.Interface, config framework.VolumeTestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) {
dir := path.Join("/opt", config.Prefix, config.Namespace)
dd_input := path.Join(dir, "dd_if")
writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value
loopCnt := minFileSize / int64(len(writeBlk))
// initContainer cmd to create and fill dd's input file. The initContainer is used to create
// the `dd` input file which is currently 1MiB. Rather than store a 1MiB go value, a loop is
// used to create a 1MiB file in the target directory.
initCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do echo -n %s >>%s; let i+=1; done", loopCnt, writeBlk, dd_input)
clientPod := makePodSpec(config, dir, initCmd, volsrc, podSecContext)
By(fmt.Sprintf("starting %s", clientPod.Name))
podsNamespacer := cs.CoreV1().Pods(config.Namespace)
clientPod, err = podsNamespacer.Create(clientPod)
if err != nil {
return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err)
}
defer func() {
// note the test dir will be removed when the kubelet unmounts it
By(fmt.Sprintf("deleting client pod %q...", clientPod.Name))
e := framework.DeletePodWithWait(f, cs, clientPod)
if e != nil {
framework.Logf("client pod failed to delete: %v", e)
if err == nil { // delete err is returned if err is not set
err = e
}
} else {
framework.Logf("sleeping a bit so kubelet can unmount and detach the volume")
time.Sleep(framework.PodCleanupTimeout)
}
}()
err = framework.WaitForPodRunningInNamespace(cs, clientPod)
if err != nil {
return fmt.Errorf("client pod %q not running: %v", clientPod.Name, err)
}
// create files of the passed-in file sizes and verify test file size and content
for _, fsize := range fsizes {
// file sizes must be a multiple of `minFileSize`
if math.Mod(float64(fsize), float64(minFileSize)) != 0 {
fsize = fsize/minFileSize + minFileSize
}
fpath := path.Join(dir, fmt.Sprintf("%s-%d", file, fsize))
if err = writeToFile(clientPod, fpath, dd_input, fsize); err != nil {
return err
}
if err = verifyFile(clientPod, fpath, fsize, dd_input); err != nil {
return err
}
deleteFile(clientPod, fpath)
}
return
}
// These tests need privileged containers which are disabled by default.
// TODO: support all of the plugins tested in storage/volumes.go
var _ = utils.SIGDescribe("Volume plugin streaming [Slow]", func() {
f := framework.NewDefaultFramework("volume-io")
var (
config framework.VolumeTestConfig
cs clientset.Interface
ns string
serverIP string
serverPod *v1.Pod
volSource v1.VolumeSource
)
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
})
////////////////////////////////////////////////////////////////////////
// NFS
////////////////////////////////////////////////////////////////////////
Describe("NFS", func() {
testFile := "nfs_io_test"
// client pod uses selinux
podSec := v1.PodSecurityContext{
SELinuxOptions: &v1.SELinuxOptions{
Level: "s0:c0,c1",
},
}
BeforeEach(func() {
config, serverPod, serverIP = framework.NewNFSServer(cs, ns, []string{})
volSource = v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Server: serverIP,
Path: "/",
ReadOnly: false,
},
}
})
AfterEach(func() {
framework.Logf("AfterEach: deleting NFS server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod)
Expect(err).NotTo(HaveOccurred(), "AfterEach: NFS server pod failed to delete")
})
It("should write files of various sizes, verify size, validate content", func() {
fileSizes := []int64{fileSizeSmall, fileSizeMedium, fileSizeLarge}
err := testVolumeIO(f, cs, config, volSource, &podSec, testFile, fileSizes)
Expect(err).NotTo(HaveOccurred())
})
})
////////////////////////////////////////////////////////////////////////
// Gluster
////////////////////////////////////////////////////////////////////////
Describe("GlusterFS", func() {
var name string
testFile := "gluster_io_test"
BeforeEach(func() {
framework.SkipUnlessNodeOSDistroIs("gci")
// create gluster server and endpoints
config, serverPod, serverIP = framework.NewGlusterfsServer(cs, ns)
name = config.Prefix + "-server"
volSource = v1.VolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{
EndpointsName: name,
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
Path: "test_vol",
ReadOnly: false,
},
}
})
AfterEach(func() {
framework.Logf("AfterEach: deleting Gluster endpoints %q...", name)
epErr := cs.CoreV1().Endpoints(ns).Delete(name, nil)
framework.Logf("AfterEach: deleting Gluster server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod)
if epErr != nil || err != nil {
if epErr != nil {
framework.Logf("AfterEach: Gluster delete endpoints failed: %v", err)
}
if err != nil {
framework.Logf("AfterEach: Gluster server pod delete failed: %v", err)
}
framework.Failf("AfterEach: cleanup failed")
}
})
It("should write files of various sizes, verify size, validate content", func() {
fileSizes := []int64{fileSizeSmall, fileSizeMedium}
err := testVolumeIO(f, cs, config, volSource, nil /*no secContext*/, testFile, fileSizes)
Expect(err).NotTo(HaveOccurred())
})
})
////////////////////////////////////////////////////////////////////////
// iSCSI
// The iscsiadm utility and iscsi target kernel modules must be installed on all nodes.
////////////////////////////////////////////////////////////////////////
Describe("iSCSI [Feature:Volumes]", func() {
testFile := "iscsi_io_test"
BeforeEach(func() {
config, serverPod, serverIP = framework.NewISCSIServer(cs, ns)
volSource = v1.VolumeSource{
ISCSI: &v1.ISCSIVolumeSource{
TargetPortal: serverIP + ":3260",
// from test/images/volumes-tester/iscsi/initiatorname.iscsi
IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c",
Lun: 0,
FSType: "ext2",
ReadOnly: false,
},
}
})
AfterEach(func() {
framework.Logf("AfterEach: deleting iSCSI server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod)
Expect(err).NotTo(HaveOccurred(), "AfterEach: iSCSI server pod failed to delete")
})
It("should write files of various sizes, verify size, validate content", func() {
fileSizes := []int64{fileSizeSmall, fileSizeMedium}
fsGroup := int64(1234)
podSec := v1.PodSecurityContext{
FSGroup: &fsGroup,
}
err := testVolumeIO(f, cs, config, volSource, &podSec, testFile, fileSizes)
Expect(err).NotTo(HaveOccurred())
})
})
////////////////////////////////////////////////////////////////////////
// Ceph RBD
////////////////////////////////////////////////////////////////////////
Describe("Ceph-RBD [Feature:Volumes]", func() {
var (
secret *v1.Secret
)
testFile := "ceph-rbd_io_test"
BeforeEach(func() {
config, serverPod, secret, serverIP = framework.NewRBDServer(cs, ns)
volSource = v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{serverIP},
RBDPool: "rbd",
RBDImage: "foo",
RadosUser: "admin",
SecretRef: &v1.LocalObjectReference{
Name: secret.Name,
},
FSType: "ext2",
ReadOnly: false,
},
}
})
AfterEach(func() {
framework.Logf("AfterEach: deleting Ceph-RDB server secret %q...", secret.Name)
secErr := cs.CoreV1().Secrets(ns).Delete(secret.Name, &metav1.DeleteOptions{})
framework.Logf("AfterEach: deleting Ceph-RDB server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod)
if secErr != nil || err != nil {
if secErr != nil {
framework.Logf("AfterEach: Ceph-RDB delete secret failed: %v", secErr)
}
if err != nil {
framework.Logf("AfterEach: Ceph-RDB server pod delete failed: %v", err)
}
framework.Failf("AfterEach: cleanup failed")
}
})
It("should write files of various sizes, verify size, validate content", func() {
fileSizes := []int64{fileSizeSmall, fileSizeMedium}
fsGroup := int64(1234)
podSec := v1.PodSecurityContext{
FSGroup: &fsGroup,
}
err := testVolumeIO(f, cs, config, volSource, &podSec, testFile, fileSizes)
Expect(err).NotTo(HaveOccurred())
})
})
})

View File

@@ -0,0 +1,63 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
. "github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
var _ = utils.SIGDescribe("Volume limits", func() {
var (
c clientset.Interface
)
f := framework.NewDefaultFramework("volume-limits-on-node")
BeforeEach(func() {
framework.SkipUnlessProviderIs("aws", "gce", "gke")
c = f.ClientSet
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
})
It("should verify that all nodes have volume limits", func() {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) == 0 {
framework.Failf("Unable to find ready and schedulable Node")
}
for _, node := range nodeList.Items {
volumeLimits := getVolumeLimit(&node)
if len(volumeLimits) == 0 {
framework.Failf("Expected volume limits to be set")
}
}
})
})
func getVolumeLimit(node *v1.Node) map[v1.ResourceName]int64 {
volumeLimits := map[v1.ResourceName]int64{}
nodeAllocatables := node.Status.Allocatable
for k, v := range nodeAllocatables {
if v1helper.IsAttachableVolumeResourceName(k) {
volumeLimits[k] = v.Value()
}
}
return volumeLimits
}

View File

@@ -167,6 +167,127 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
})
It("should create metrics for total time taken in volume operations in P/V Controller", func() {
var err error
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred())
Expect(pvc).ToNot(Equal(nil))
claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, nil, claims, false, "")
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPodRunningInNamespace(c, pod)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name)
pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
controllerMetrics, err := metricsGrabber.GrabFromControllerManager()
if err != nil {
framework.Skipf("Could not get controller-manager metrics - skipping")
}
metricKey := "volume_operation_total_seconds_count"
dimensions := []string{"operation_name", "plugin_name"}
valid := hasValidMetrics(metrics.Metrics(controllerMetrics), metricKey, dimensions...)
Expect(valid).To(BeTrue(), "Invalid metric in P/V Controller metrics: %q", metricKey)
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
})
It("should create volume metrics in Volume Manager", func() {
var err error
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred())
Expect(pvc).ToNot(Equal(nil))
claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, nil, claims, false, "")
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPodRunningInNamespace(c, pod)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name)
pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
kubeMetrics, err := metricsGrabber.GrabFromKubelet(pod.Spec.NodeName)
Expect(err).NotTo(HaveOccurred())
// Metrics should have dimensions plugin_name and state available
totalVolumesKey := "volume_manager_total_volumes"
dimensions := []string{"state", "plugin_name"}
valid := hasValidMetrics(metrics.Metrics(kubeMetrics), totalVolumesKey, dimensions...)
Expect(valid).To(BeTrue(), "Invalid metric in Volume Manager metrics: %q", totalVolumesKey)
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
})
It("should create metrics for total number of volumes in A/D Controller", func() {
var err error
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred())
Expect(pvc).ToNot(Equal(nil))
claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, nil, claims, false, "")
// Get metrics
controllerMetrics, err := metricsGrabber.GrabFromControllerManager()
if err != nil {
framework.Skipf("Could not get controller-manager metrics - skipping")
}
// Create pod
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPodRunningInNamespace(c, pod)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name)
pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Get updated metrics
updatedControllerMetrics, err := metricsGrabber.GrabFromControllerManager()
if err != nil {
framework.Skipf("Could not get controller-manager metrics - skipping")
}
// Forced detach metric should be present
forceDetachKey := "attachdetach_controller_forced_detaches"
_, ok := updatedControllerMetrics[forceDetachKey]
Expect(ok).To(BeTrue(), "Key %q not found in A/D Controller metrics", forceDetachKey)
// Wait and validate
totalVolumesKey := "attachdetach_controller_total_volumes"
states := []string{"actual_state_of_world", "desired_state_of_world"}
dimensions := []string{"state", "plugin_name"}
waitForADControllerStatesMetrics(metricsGrabber, totalVolumesKey, dimensions, states)
// Total number of volumes in both ActualStateofWorld and DesiredStateOfWorld
// states should be higher or equal than it used to be
oldStates := getStatesMetrics(totalVolumesKey, metrics.Metrics(controllerMetrics))
updatedStates := getStatesMetrics(totalVolumesKey, metrics.Metrics(updatedControllerMetrics))
for _, stateName := range states {
if _, ok := oldStates[stateName]; !ok {
continue
}
for pluginName, numVolumes := range updatedStates[stateName] {
oldNumVolumes := oldStates[stateName][pluginName]
Expect(numVolumes).To(BeNumerically(">=", oldNumVolumes),
"Wrong number of volumes in state %q, plugin %q: wanted >=%d, got %d",
stateName, pluginName, oldNumVolumes, numVolumes)
}
}
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
})
// Test for pv controller metrics, concretely: bound/unbound pv/pvc count.
Describe("PVController", func() {
const (
@@ -454,3 +575,61 @@ func calculateRelativeValues(originValues, updatedValues map[string]int64) map[s
}
return relativeValues
}
func hasValidMetrics(metrics metrics.Metrics, metricKey string, dimensions ...string) bool {
var errCount int
framework.Logf("Looking for sample in metric %q", metricKey)
samples, ok := metrics[metricKey]
if !ok {
framework.Logf("Key %q was not found in metrics", metricKey)
return false
}
for _, sample := range samples {
framework.Logf("Found sample %q", sample.String())
for _, d := range dimensions {
if _, ok := sample.Metric[model.LabelName(d)]; !ok {
framework.Logf("Error getting dimension %q for metric %q, sample %q", d, metricKey, sample.String())
errCount++
}
}
}
return errCount == 0
}
func getStatesMetrics(metricKey string, givenMetrics metrics.Metrics) map[string]map[string]int64 {
states := make(map[string]map[string]int64)
for _, sample := range givenMetrics[metricKey] {
framework.Logf("Found sample %q", sample.String())
state := string(sample.Metric["state"])
pluginName := string(sample.Metric["plugin_name"])
states[state] = map[string]int64{pluginName: int64(sample.Value)}
}
return states
}
func waitForADControllerStatesMetrics(metricsGrabber *metrics.MetricsGrabber, metricName string, dimensions []string, stateNames []string) {
backoff := wait.Backoff{
Duration: 10 * time.Second,
Factor: 1.2,
Steps: 21,
}
verifyMetricFunc := func() (bool, error) {
updatedMetrics, err := metricsGrabber.GrabFromControllerManager()
if err != nil {
framework.Skipf("Could not get controller-manager metrics - skipping")
return false, err
}
if !hasValidMetrics(metrics.Metrics(updatedMetrics), metricName, dimensions...) {
return false, fmt.Errorf("could not get valid metrics for %q", metricName)
}
states := getStatesMetrics(metricName, metrics.Metrics(updatedMetrics))
for _, name := range stateNames {
if _, ok := states[name]; !ok {
return false, fmt.Errorf("could not get state %q from A/D Controller metrics", name)
}
}
return true, nil
}
waitErr := wait.ExponentialBackoff(backoff, verifyMetricFunc)
Expect(waitErr).NotTo(HaveOccurred(), "Timeout error fetching A/D controller metrics : %v", waitErr)
}

View File

@@ -43,21 +43,25 @@ import (
"k8s.io/apiserver/pkg/authentication/serviceaccount"
clientset "k8s.io/client-go/kubernetes"
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
type storageClassTest struct {
name string
cloudProviders []string
provisioner string
parameters map[string]string
claimSize string
expectedSize string
pvCheck func(volume *v1.PersistentVolume) error
nodeName string
attach bool
volumeMode *v1.PersistentVolumeMode
name string
cloudProviders []string
provisioner string
parameters map[string]string
delayBinding bool
claimSize string
expectedSize string
pvCheck func(volume *v1.PersistentVolume) error
nodeName string
skipWriteReadCheck bool
volumeMode *v1.PersistentVolumeMode
}
const (
@@ -132,7 +136,7 @@ func testDynamicProvisioning(t storageClassTest, client clientset.Interface, cla
Expect(err).NotTo(HaveOccurred())
}
if t.attach {
if !t.skipWriteReadCheck {
// We start two pods:
// - The first writes 'hello word' to the /mnt/test (= the volume).
// - The second one runs grep 'hello world' on /mnt/test.
@@ -169,6 +173,113 @@ func testDynamicProvisioning(t storageClassTest, client clientset.Interface, cla
return pv
}
func testBindingWaitForFirstConsumer(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass) (*v1.PersistentVolume, *v1.Node) {
var err error
By("creating a storage class " + class.Name)
class, err = client.StorageV1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred())
defer deleteStorageClass(client, class.Name)
By("creating a claim")
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(client, claim.Name, claim.Namespace), "Failed to delete PVC ", claim.Name)
}()
// Wait for ClaimProvisionTimeout and make sure the phase did not become Bound i.e. the Wait errors out
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
Expect(err).To(HaveOccurred())
By("checking the claim is in pending state")
// Get new copy of the claim
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(claim.Status.Phase).To(Equal(v1.ClaimPending))
By("creating a pod referring to the claim")
// Create a pod referring to the claim and wait for it to get to running
pod, err := framework.CreateClientPod(client, claim.Namespace, claim)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.DeletePodOrFail(client, pod.Namespace, pod.Name)
}()
By("re-checking the claim to see it binded")
// Get new copy of the claim
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// make sure claim did bind
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
// collect node and pv details
node, err := client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
return pv, node
}
func checkZoneFromLabelAndAffinity(pv *v1.PersistentVolume, zone string, matchZone bool) {
checkZonesFromLabelAndAffinity(pv, sets.NewString(zone), matchZone)
}
// checkZoneLabelAndAffinity checks the LabelZoneFailureDomain label of PV and terms
// with key LabelZoneFailureDomain in PV's node affinity contains zone
// matchZones is used to indicate if zones should match perfectly
func checkZonesFromLabelAndAffinity(pv *v1.PersistentVolume, zones sets.String, matchZones bool) {
By("checking PV's zone label and node affinity terms match expected zone")
if pv == nil {
framework.Failf("nil pv passed")
}
pvLabel, ok := pv.Labels[kubeletapis.LabelZoneFailureDomain]
if !ok {
framework.Failf("label %s not found on PV", kubeletapis.LabelZoneFailureDomain)
}
zonesFromLabel, err := volumeutil.LabelZonesToSet(pvLabel)
if err != nil {
framework.Failf("unable to parse zone labels %s: %v", pvLabel, err)
}
if matchZones && !zonesFromLabel.Equal(zones) {
framework.Failf("value[s] of %s label for PV: %v does not match expected zone[s]: %v", kubeletapis.LabelZoneFailureDomain, zonesFromLabel, zones)
}
if !matchZones && !zonesFromLabel.IsSuperset(zones) {
framework.Failf("value[s] of %s label for PV: %v does not contain expected zone[s]: %v", kubeletapis.LabelZoneFailureDomain, zonesFromLabel, zones)
}
if pv.Spec.NodeAffinity == nil {
framework.Failf("node affinity not found in PV spec %v", pv.Spec)
}
if len(pv.Spec.NodeAffinity.Required.NodeSelectorTerms) == 0 {
framework.Failf("node selector terms not found in PV spec %v", pv.Spec)
}
for _, term := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms {
keyFound := false
for _, r := range term.MatchExpressions {
if r.Key != kubeletapis.LabelZoneFailureDomain {
continue
}
keyFound = true
zonesFromNodeAffinity := sets.NewString(r.Values...)
if matchZones && !zonesFromNodeAffinity.Equal(zones) {
framework.Failf("zones from NodeAffinity of PV: %v does not equal expected zone[s]: %v", zonesFromNodeAffinity, zones)
}
if !matchZones && !zonesFromNodeAffinity.IsSuperset(zones) {
framework.Failf("zones from NodeAffinity of PV: %v does not contain expected zone[s]: %v", zonesFromNodeAffinity, zones)
}
break
}
if !keyFound {
framework.Failf("label %s not found in term %v", kubeletapis.LabelZoneFailureDomain, term)
}
}
}
// checkAWSEBS checks properties of an AWS EBS. Test framework does not
// instantiate full AWS provider, therefore we need use ec2 API directly.
func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool) error {
@@ -250,7 +361,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
})
Describe("DynamicProvisioner", func() {
It("should provision storage with different parameters [Slow]", func() {
It("should provision storage with different parameters", func() {
cloudZone := getRandomCloudZone(c)
// This test checks that dynamic provisioning can provision a volume
@@ -265,8 +376,8 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
"type": "pd-ssd",
"zone": cloudZone,
},
claimSize: "1.5G",
expectedSize: "2G",
claimSize: "1.5Gi",
expectedSize: "2Gi",
pvCheck: func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-ssd")
},
@@ -278,8 +389,8 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
parameters: map[string]string{
"type": "pd-standard",
},
claimSize: "1.5G",
expectedSize: "2G",
claimSize: "1.5Gi",
expectedSize: "2Gi",
pvCheck: func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-standard")
},
@@ -443,8 +554,8 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
parameters: map[string]string{
"type": "pd-standard",
},
claimSize: "1G",
expectedSize: "1G",
claimSize: "1Gi",
expectedSize: "1Gi",
pvCheck: func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-standard")
},
@@ -477,8 +588,8 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
parameters: map[string]string{
"type": "pd-standard",
},
claimSize: "1G",
expectedSize: "1G",
claimSize: "1Gi",
expectedSize: "1Gi",
pvCheck: func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-standard")
},
@@ -526,7 +637,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
name: "unmanaged_zone",
provisioner: "kubernetes.io/gce-pd",
parameters: map[string]string{"zone": unmanagedZone},
claimSize: "1G",
claimSize: "1Gi",
}
sc := newStorageClass(test, ns, suffix)
sc, err = c.StorageV1().StorageClasses().Create(sc)
@@ -675,17 +786,38 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Describe("DynamicProvisioner External", func() {
It("should let an external dynamic provisioner create and delete persistent volumes [Slow]", func() {
// external dynamic provisioner pods need additional permissions provided by the
// persistent-volume-provisioner role
framework.BindClusterRole(c.RbacV1beta1(), "system:persistent-volume-provisioner", ns,
rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: ns, Name: "default"})
// persistent-volume-provisioner clusterrole and a leader-locking role
serviceAccountName := "default"
subject := rbacv1beta1.Subject{
Kind: rbacv1beta1.ServiceAccountKind,
Namespace: ns,
Name: serviceAccountName,
}
err := framework.WaitForAuthorizationUpdate(c.AuthorizationV1beta1(),
serviceaccount.MakeUsername(ns, "default"),
framework.BindClusterRole(c.RbacV1beta1(), "system:persistent-volume-provisioner", ns, subject)
roleName := "leader-locking-nfs-provisioner"
_, err := f.ClientSet.RbacV1beta1().Roles(ns).Create(&rbacv1beta1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
},
Rules: []rbacv1beta1.PolicyRule{{
APIGroups: []string{""},
Resources: []string{"endpoints"},
Verbs: []string{"get", "list", "watch", "create", "update", "patch"},
}},
})
framework.ExpectNoError(err, "Failed to create leader-locking role")
framework.BindRoleInNamespace(c.RbacV1beta1(), roleName, ns, subject)
err = framework.WaitForAuthorizationUpdate(c.AuthorizationV1beta1(),
serviceaccount.MakeUsername(ns, serviceAccountName),
"", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true)
framework.ExpectNoError(err, "Failed to update authorization: %v", err)
framework.ExpectNoError(err, "Failed to update authorization")
By("creating an external dynamic provisioner pod")
pod := startExternalProvisioner(c, ns)
pod := utils.StartExternalProvisioner(c, ns, externalPluginName)
defer framework.DeletePodOrFail(c, ns, pod.Name)
By("creating a StorageClass")
@@ -714,13 +846,6 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
claimSize: "2Gi",
expectedSize: "2Gi",
}
// gce or gke
if getDefaultPluginName() == "kubernetes.io/gce-pd" {
// using GB not GiB as e2e test unit since gce-pd returns GB,
// or expectedSize may be greater than claimSize.
test.claimSize = "2G"
test.expectedSize = "2G"
}
claim := newClaim(test, ns, "default")
testDynamicProvisioning(test, c, claim, nil)
@@ -796,24 +921,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
serverUrl := "https://" + pod.Status.PodIP + ":8081"
By("creating a StorageClass")
test := storageClassTest{
name: "Gluster Dynamic provisioner test",
provisioner: "kubernetes.io/glusterfs",
claimSize: "2Gi",
expectedSize: "2Gi",
parameters: map[string]string{"resturl": serverUrl},
attach: false,
name: "Gluster Dynamic provisioner test",
provisioner: "kubernetes.io/glusterfs",
claimSize: "2Gi",
expectedSize: "2Gi",
parameters: map[string]string{"resturl": serverUrl},
skipWriteReadCheck: true,
}
// GCE/GKE
if getDefaultPluginName() == "kubernetes.io/gce-pd" {
// Keeping an extra condition here based on below facts:
//*) gce-pd rounds up to the next gb.
//*) GlusterFS provisioner rounduptoGiB() and send it to backend,
// which does 'size/number' from provisioner*1024*1024*1024
test.claimSize = "2Gi"
test.expectedSize = "3G"
}
suffix := fmt.Sprintf("glusterdptest")
class := newStorageClass(test, ns, suffix)
@@ -833,24 +947,185 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
By("creating a claim with default class")
block := v1.PersistentVolumeBlock
test := storageClassTest{
name: "default",
claimSize: "2Gi",
expectedSize: "2Gi",
volumeMode: &block,
name: "default",
claimSize: "2Gi",
expectedSize: "2Gi",
volumeMode: &block,
skipWriteReadCheck: true,
}
// gce or gke
if getDefaultPluginName() == "kubernetes.io/gce-pd" {
// using GB not GiB as e2e test unit since gce-pd returns GB,
// or expectedSize may be greater than claimSize.
test.claimSize = "2G"
test.expectedSize = "2G"
}
claim := newClaim(test, ns, "default")
claim.Spec.VolumeMode = &block
testDynamicProvisioning(test, c, claim, nil)
})
})
Describe("Invalid AWS KMS key", func() {
It("should report an error and create no PV", func() {
framework.SkipUnlessProviderIs("aws")
test := storageClassTest{
name: "AWS EBS with invalid KMS key",
provisioner: "kubernetes.io/aws-ebs",
claimSize: "2Gi",
parameters: map[string]string{"kmsKeyId": "arn:aws:kms:us-east-1:123456789012:key/55555555-5555-5555-5555-555555555555"},
}
By("creating a StorageClass")
suffix := fmt.Sprintf("invalid-aws")
class := newStorageClass(test, ns, suffix)
class, err := c.StorageV1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("deleting storage class %s", class.Name)
framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(class.Name, nil))
}()
By("creating a claim object with a suffix for gluster dynamic provisioner")
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
if err != nil && !apierrs.IsNotFound(err) {
framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
}
}()
// Watch events until the message about invalid key appears
err = wait.Poll(time.Second, framework.ClaimProvisionTimeout, func() (bool, error) {
events, err := c.CoreV1().Events(claim.Namespace).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, event := range events.Items {
if strings.Contains(event.Message, "failed to create encrypted volume: the volume disappeared after creation, most likely due to inaccessible KMS encryption key") {
return true, nil
}
}
return false, nil
})
Expect(err).NotTo(HaveOccurred())
})
})
Describe("DynamicProvisioner delayed binding [Slow]", func() {
It("should create a persistent volume in the same zone as node after a pod mounting the claim is started", func() {
tests := []storageClassTest{
{
name: "Delayed binding EBS storage class test",
cloudProviders: []string{"aws"},
provisioner: "kubernetes.io/aws-ebs",
claimSize: "2Gi",
delayBinding: true,
},
{
name: "Delayed binding GCE PD storage class test",
cloudProviders: []string{"gce", "gke"},
provisioner: "kubernetes.io/gce-pd",
claimSize: "2Gi",
delayBinding: true,
},
}
for _, test := range tests {
if !framework.ProviderIs(test.cloudProviders...) {
framework.Logf("Skipping %q: cloud providers is not %v", test.name, test.cloudProviders)
continue
}
By("creating a claim with class with waitForFirstConsumer")
suffix := "delayed"
class := newStorageClass(test, ns, suffix)
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
pv, node := testBindingWaitForFirstConsumer(c, claim, class)
if node == nil {
framework.Failf("unexpected nil node found")
}
zone, ok := node.Labels[kubeletapis.LabelZoneFailureDomain]
if !ok {
framework.Failf("label %s not found on Node", kubeletapis.LabelZoneFailureDomain)
}
checkZoneFromLabelAndAffinity(pv, zone, true)
}
})
})
Describe("DynamicProvisioner allowedTopologies", func() {
It("should create persistent volume in the zone specified in allowedTopologies of storageclass", func() {
tests := []storageClassTest{
{
name: "AllowedTopologies EBS storage class test",
cloudProviders: []string{"aws"},
provisioner: "kubernetes.io/aws-ebs",
claimSize: "2Gi",
expectedSize: "2Gi",
},
{
name: "AllowedTopologies GCE PD storage class test",
cloudProviders: []string{"gce", "gke"},
provisioner: "kubernetes.io/gce-pd",
claimSize: "2Gi",
expectedSize: "2Gi",
},
}
for _, test := range tests {
if !framework.ProviderIs(test.cloudProviders...) {
framework.Logf("Skipping %q: cloud providers is not %v", test.name, test.cloudProviders)
continue
}
By("creating a claim with class with allowedTopologies set")
suffix := "topology"
class := newStorageClass(test, ns, suffix)
zone := getRandomCloudZone(c)
addSingleZoneAllowedTopologyToStorageClass(c, class, zone)
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
pv := testDynamicProvisioning(test, c, claim, class)
checkZoneFromLabelAndAffinity(pv, zone, true)
}
})
})
Describe("DynamicProvisioner delayed binding with allowedTopologies [Slow]", func() {
It("should create persistent volume in the same zone as specified in allowedTopologies after a pod mounting the claim is started", func() {
tests := []storageClassTest{
{
name: "AllowedTopologies and delayed binding EBS storage class test",
cloudProviders: []string{"aws"},
provisioner: "kubernetes.io/aws-ebs",
claimSize: "2Gi",
delayBinding: true,
},
{
name: "AllowedTopologies and delayed binding GCE PD storage class test",
cloudProviders: []string{"gce", "gke"},
provisioner: "kubernetes.io/gce-pd",
claimSize: "2Gi",
delayBinding: true,
},
}
for _, test := range tests {
if !framework.ProviderIs(test.cloudProviders...) {
framework.Logf("Skipping %q: cloud providers is not %v", test.name, test.cloudProviders)
continue
}
By("creating a claim with class with WaitForFirstConsumer and allowedTopologies")
suffix := "delayed-topo"
class := newStorageClass(test, ns, suffix)
topoZone := getRandomCloudZone(c)
addSingleZoneAllowedTopologyToStorageClass(c, class, topoZone)
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
pv, node := testBindingWaitForFirstConsumer(c, claim, class)
if node == nil {
framework.Failf("unexpected nil node found")
}
nodeZone, ok := node.Labels[kubeletapis.LabelZoneFailureDomain]
if !ok {
framework.Failf("label %s not found on Node", kubeletapis.LabelZoneFailureDomain)
}
if topoZone != nodeZone {
framework.Failf("zone specified in allowedTopologies: %s does not match zone of node where PV got provisioned: %s", topoZone, nodeZone)
}
checkZoneFromLabelAndAffinity(pv, topoZone, true)
}
})
})
})
func getDefaultStorageClassName(c clientset.Interface) string {
@@ -905,7 +1180,7 @@ func updateDefaultStorageClass(c clientset.Interface, scName string, defaultStr
verifyDefaultStorageClass(c, scName, expectedDefault)
}
func newClaim(t storageClassTest, ns, suffix string) *v1.PersistentVolumeClaim {
func getClaim(claimSize string, ns string) *v1.PersistentVolumeClaim {
claim := v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
@@ -917,7 +1192,7 @@ func newClaim(t storageClassTest, ns, suffix string) *v1.PersistentVolumeClaim {
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(t.claimSize),
v1.ResourceName(v1.ResourceStorage): resource.MustParse(claimSize),
},
},
},
@@ -926,6 +1201,10 @@ func newClaim(t storageClassTest, ns, suffix string) *v1.PersistentVolumeClaim {
return &claim
}
func newClaim(t storageClassTest, ns, suffix string) *v1.PersistentVolumeClaim {
return getClaim(t.claimSize, ns)
}
// runInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
func runInPodWithVolume(c clientset.Interface, ns, claimName, nodeName, command string) {
pod := &v1.Pod{
@@ -940,7 +1219,7 @@ func runInPodWithVolume(c clientset.Interface, ns, claimName, nodeName, command
Containers: []v1.Container{
{
Name: "volume-tester",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
VolumeMounts: []v1.VolumeMount{
@@ -993,6 +1272,18 @@ func getDefaultPluginName() string {
return ""
}
func addSingleZoneAllowedTopologyToStorageClass(c clientset.Interface, sc *storage.StorageClass, zone string) {
term := v1.TopologySelectorTerm{
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
{
Key: kubeletapis.LabelZoneFailureDomain,
Values: []string{zone},
},
},
}
sc.AllowedTopologies = append(sc.AllowedTopologies, term)
}
func newStorageClass(t storageClassTest, ns string, suffix string) *storage.StorageClass {
pluginName := t.provisioner
if pluginName == "" {
@@ -1001,6 +1292,24 @@ func newStorageClass(t storageClassTest, ns string, suffix string) *storage.Stor
if suffix == "" {
suffix = "sc"
}
bindingMode := storage.VolumeBindingImmediate
if t.delayBinding {
bindingMode = storage.VolumeBindingWaitForFirstConsumer
}
return getStorageClass(pluginName, t.parameters, &bindingMode, ns, suffix)
}
func getStorageClass(
provisioner string,
parameters map[string]string,
bindingMode *storage.VolumeBindingMode,
ns string,
suffix string,
) *storage.StorageClass {
if bindingMode == nil {
defaultBindingMode := storage.VolumeBindingImmediate
bindingMode = &defaultBindingMode
}
return &storage.StorageClass{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
@@ -1009,8 +1318,9 @@ func newStorageClass(t storageClassTest, ns string, suffix string) *storage.Stor
// Name must be unique, so let's base it on namespace name
Name: ns + "-" + suffix,
},
Provisioner: pluginName,
Parameters: t.parameters,
Provisioner: provisioner,
Parameters: parameters,
VolumeBindingMode: bindingMode,
}
}
@@ -1086,79 +1396,6 @@ func startGlusterDpServerPod(c clientset.Interface, ns string) *v1.Pod {
return pod
}
func startExternalProvisioner(c clientset.Interface, ns string) *v1.Pod {
podClient := c.CoreV1().Pods(ns)
provisionerPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "external-provisioner-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nfs-provisioner",
Image: "quay.io/kubernetes_incubator/nfs-provisioner:v1.0.9",
SecurityContext: &v1.SecurityContext{
Capabilities: &v1.Capabilities{
Add: []v1.Capability{"DAC_READ_SEARCH"},
},
},
Args: []string{
"-provisioner=" + externalPluginName,
"-grace-period=0",
},
Ports: []v1.ContainerPort{
{Name: "nfs", ContainerPort: 2049},
{Name: "mountd", ContainerPort: 20048},
{Name: "rpcbind", ContainerPort: 111},
{Name: "rpcbind-udp", ContainerPort: 111, Protocol: v1.ProtocolUDP},
},
Env: []v1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
},
ImagePullPolicy: v1.PullIfNotPresent,
VolumeMounts: []v1.VolumeMount{
{
Name: "export-volume",
MountPath: "/export",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "export-volume",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
},
}
provisionerPod, err := podClient.Create(provisionerPod)
framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod))
By("locating the provisioner pod")
pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
return pod
}
// waitForProvisionedVolumesDelete is a polling wrapper to scan all PersistentVolumes for any associated to the test's
// StorageClass. Returns either an error and nil values or the remaining PVs and their count.
func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]*v1.PersistentVolume, error) {

View File

@@ -14,72 +14,18 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* This test checks that various VolumeSources are working.
*
* There are two ways, how to test the volumes:
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...)
* The test creates a server pod, exporting simple 'index.html' file.
* Then it uses appropriate VolumeSource to import this file into a client pod
* and checks that the pod can see the file. It does so by importing the file
* into web server root and loadind the index.html from it.
*
* These tests work only when privileged containers are allowed, exporting
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
* other privileged magic in the server pod.
*
* Note that the server containers are for testing purposes only and should not
* be used in production.
*
* 2) With server outside of Kubernetes (Cinder, ...)
* Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
* the tested Kubernetes cluster. The test itself creates a new volume,
* and checks, that Kubernetes can use it as a volume.
*/
// test/e2e/common/volumes.go duplicates the GlusterFS test from this file. Any changes made to this
// test should be made there as well.
// This test is volumes test for configmap.
package storage
import (
"os/exec"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere"
)
func DeleteCinderVolume(name string) error {
// Try to delete the volume for several seconds - it takes
// a while for the plugin to detach it.
var output []byte
var err error
timeout := time.Second * 120
framework.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
output, err = exec.Command("cinder", "delete", name).CombinedOutput()
if err == nil {
framework.Logf("Cinder volume %s deleted", name)
return nil
} else {
framework.Logf("Failed to delete volume %s: %v", name, err)
}
}
framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:]))
return err
}
// These tests need privileged containers, which are disabled by default.
var _ = utils.SIGDescribe("Volumes", func() {
f := framework.NewDefaultFramework("volume")
@@ -94,277 +40,6 @@ var _ = utils.SIGDescribe("Volumes", func() {
namespace = f.Namespace
})
////////////////////////////////////////////////////////////////////////
// NFS
////////////////////////////////////////////////////////////////////////
Describe("NFS", func() {
It("should be mountable", func() {
config, _, serverIP := framework.NewNFSServer(cs, namespace.Name, []string{})
defer framework.VolumeTestCleanup(f, config)
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Server: serverIP,
Path: "/",
ReadOnly: true,
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/nfs/index.html
ExpectedContent: "Hello from NFS!",
},
}
framework.TestVolumeClient(cs, config, nil, tests)
})
})
////////////////////////////////////////////////////////////////////////
// Gluster
////////////////////////////////////////////////////////////////////////
Describe("GlusterFS", func() {
It("should be mountable", func() {
//TODO (copejon) GFS is not supported on debian image.
framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu")
// create gluster server and endpoints
config, _, _ := framework.NewGlusterfsServer(cs, namespace.Name)
name := config.Prefix + "-server"
defer func() {
framework.VolumeTestCleanup(f, config)
err := cs.CoreV1().Endpoints(namespace.Name).Delete(name, nil)
Expect(err).NotTo(HaveOccurred(), "defer: Gluster delete endpoints failed")
}()
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{
EndpointsName: name,
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
Path: "test_vol",
ReadOnly: true,
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/gluster/index.html
ExpectedContent: "Hello from GlusterFS!",
},
}
framework.TestVolumeClient(cs, config, nil, tests)
})
})
////////////////////////////////////////////////////////////////////////
// iSCSI
////////////////////////////////////////////////////////////////////////
// The test needs privileged containers, which are disabled by default.
// Also, make sure that iscsiadm utility and iscsi target kernel modules
// are installed on all nodes!
// Run the test with "go run hack/e2e.go ... --ginkgo.focus=iSCSI"
Describe("iSCSI [Feature:Volumes]", func() {
It("should be mountable", func() {
config, _, serverIP := framework.NewISCSIServer(cs, namespace.Name)
defer framework.VolumeTestCleanup(f, config)
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
ISCSI: &v1.ISCSIVolumeSource{
TargetPortal: serverIP + ":3260",
// from test/images/volumes-tester/iscsi/initiatorname.iscsi
IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c",
Lun: 0,
FSType: "ext2",
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/iscsi/block.tar.gz
ExpectedContent: "Hello from iSCSI",
},
}
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
})
})
////////////////////////////////////////////////////////////////////////
// Ceph RBD
////////////////////////////////////////////////////////////////////////
Describe("Ceph RBD [Feature:Volumes]", func() {
It("should be mountable", func() {
config, _, secret, serverIP := framework.NewRBDServer(cs, namespace.Name)
defer framework.VolumeTestCleanup(f, config)
defer cs.CoreV1().Secrets(config.Namespace).Delete(secret.Name, nil)
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{serverIP},
RBDPool: "rbd",
RBDImage: "foo",
RadosUser: "admin",
SecretRef: &v1.LocalObjectReference{
Name: secret.Name,
},
FSType: "ext2",
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/rbd/create_block.sh
ExpectedContent: "Hello from RBD",
},
}
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
})
})
////////////////////////////////////////////////////////////////////////
// Ceph
////////////////////////////////////////////////////////////////////////
Describe("CephFS [Feature:Volumes]", func() {
It("should be mountable", func() {
config, _, secret, serverIP := framework.NewRBDServer(cs, namespace.Name)
defer framework.VolumeTestCleanup(f, config)
defer cs.CoreV1().Secrets(config.Namespace).Delete(secret.Name, nil)
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
CephFS: &v1.CephFSVolumeSource{
Monitors: []string{serverIP + ":6789"},
User: "kube",
SecretRef: &v1.LocalObjectReference{Name: secret.Name},
ReadOnly: true,
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/ceph/index.html
ExpectedContent: "Hello Ceph!",
},
}
framework.TestVolumeClient(cs, config, nil, tests)
})
})
////////////////////////////////////////////////////////////////////////
// OpenStack Cinder
////////////////////////////////////////////////////////////////////////
// This test assumes that OpenStack client tools are installed
// (/usr/bin/nova, /usr/bin/cinder and /usr/bin/keystone)
// and that the usual OpenStack authentication env. variables are set
// (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least).
Describe("Cinder [Feature:Volumes]", func() {
It("should be mountable", func() {
framework.SkipUnlessProviderIs("openstack")
config := framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "cinder",
}
// We assume that namespace.Name is a random string
volumeName := namespace.Name
By("creating a test Cinder volume")
output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput()
outputString := string(output[:])
framework.Logf("cinder output:\n%s", outputString)
Expect(err).NotTo(HaveOccurred())
defer DeleteCinderVolume(volumeName)
// Parse 'id'' from stdout. Expected format:
// | attachments | [] |
// | availability_zone | nova |
// ...
// | id | 1d6ff08f-5d1c-41a4-ad72-4ef872cae685 |
volumeID := ""
for _, line := range strings.Split(outputString, "\n") {
fields := strings.Fields(line)
if len(fields) != 5 {
continue
}
if fields[1] != "id" {
continue
}
volumeID = fields[3]
break
}
framework.Logf("Volume ID: %s", volumeID)
Expect(volumeID).NotTo(Equal(""))
defer func() {
framework.Logf("Running volumeTestCleanup")
framework.VolumeTestCleanup(f, config)
}()
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
Cinder: &v1.CinderVolumeSource{
VolumeID: volumeID,
FSType: "ext3",
ReadOnly: false,
},
},
File: "index.html",
// Randomize index.html to make sure we don't see the
// content from previous test runs.
ExpectedContent: "Hello from Cinder from namespace " + volumeName,
},
}
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
})
})
////////////////////////////////////////////////////////////////////////
// GCE PD
////////////////////////////////////////////////////////////////////////
Describe("PD", func() {
var config framework.VolumeTestConfig
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
config = framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "pd",
// PD will be created in framework.TestContext.CloudConfig.Zone zone,
// so pods should be also scheduled there.
NodeSelector: map[string]string{
kubeletapis.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone,
},
}
})
It("should be mountable with ext3", func() {
testGCEPD(f, config, cs, "ext3")
})
It("should be mountable with ext4", func() {
testGCEPD(f, config, cs, "ext4")
})
It("should be mountable with xfs", func() {
// xfs is not supported on gci
// and not installed by default on debian
framework.SkipUnlessNodeOSDistroIs("ubuntu")
testGCEPD(f, config, cs, "xfs")
})
})
////////////////////////////////////////////////////////////////////////
// ConfigMap
////////////////////////////////////////////////////////////////////////
Describe("ConfigMap", func() {
It("should be mountable", func() {
config := framework.VolumeTestConfig{
@@ -434,139 +109,4 @@ var _ = utils.SIGDescribe("Volumes", func() {
framework.TestVolumeClient(cs, config, nil, tests)
})
})
////////////////////////////////////////////////////////////////////////
// vSphere
////////////////////////////////////////////////////////////////////////
Describe("vsphere [Feature:Volumes]", func() {
It("should be mountable", func() {
framework.SkipUnlessProviderIs("vsphere")
vspheretest.Bootstrap(f)
nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo()
var volumePath string
config := framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "vsphere",
}
volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
defer func() {
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}()
defer func() {
framework.Logf("Running volumeTestCleanup")
framework.VolumeTestCleanup(f, config)
}()
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volumePath,
FSType: "ext4",
},
},
File: "index.html",
// Randomize index.html to make sure we don't see the
// content from previous test runs.
ExpectedContent: "Hello from vSphere from namespace " + namespace.Name,
},
}
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
})
})
////////////////////////////////////////////////////////////////////////
// Azure Disk
////////////////////////////////////////////////////////////////////////
Describe("Azure Disk [Feature:Volumes]", func() {
It("should be mountable [Slow]", func() {
framework.SkipUnlessProviderIs("azure")
config := framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "azure",
}
By("creating a test azure disk volume")
volumeName, err := framework.CreatePDWithRetry()
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.DeletePDWithRetry(volumeName)
}()
defer func() {
framework.Logf("Running volumeTestCleanup")
framework.VolumeTestCleanup(f, config)
}()
fsType := "ext4"
readOnly := false
diskName := volumeName[(strings.LastIndex(volumeName, "/") + 1):]
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: diskName,
DataDiskURI: volumeName,
FSType: &fsType,
ReadOnly: &readOnly,
},
},
File: "index.html",
// Randomize index.html to make sure we don't see the
// content from previous test runs.
ExpectedContent: "Hello from Azure from namespace " + volumeName,
},
}
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
})
})
})
func testGCEPD(f *framework.Framework, config framework.VolumeTestConfig, cs clientset.Interface, fs string) {
By("creating a test gce pd volume")
volumeName, err := framework.CreatePDWithRetry()
Expect(err).NotTo(HaveOccurred())
defer func() {
// - Get NodeName from the pod spec to which the volume is mounted.
// - Force detach and delete.
pod, err := f.PodClient().Get(config.Prefix+"-client", metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed getting pod %q.", config.Prefix+"-client")
detachAndDeletePDs(volumeName, []types.NodeName{types.NodeName(pod.Spec.NodeName)})
}()
defer func() {
framework.Logf("Running volumeTestCleanup")
framework.VolumeTestCleanup(f, config)
}()
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: volumeName,
FSType: fs,
ReadOnly: false,
},
},
File: "index.html",
// Randomize index.html to make sure we don't see the
// content from previous test runs.
ExpectedContent: "Hello from GCE from namespace " + volumeName,
},
}
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
}

View File

@@ -39,8 +39,19 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/storage/vsphere",
deps = [
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
@@ -53,16 +64,6 @@ go_library(
"//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
"//vendor/gopkg.in/gcfg.v1:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)

View File

@@ -59,6 +59,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
*/
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
c = f.ClientSet
ns = f.Namespace.Name
clientPod = nil

View File

@@ -32,7 +32,6 @@ import (
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/mo"
vim25types "github.com/vmware/govmomi/vim25/types"
vimtypes "github.com/vmware/govmomi/vim25/types"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
@@ -44,6 +43,7 @@ import (
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
@@ -288,7 +288,7 @@ func getVSpherePodSpecWithClaim(claimName string, nodeSelectorKV map[string]stri
Containers: []v1.Container{
{
Name: "volume-tester",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
VolumeMounts: []v1.VolumeMount{
@@ -353,7 +353,7 @@ func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[st
Containers: []v1.Container{
{
Name: "vsphere-e2e-container-" + string(uuid.NewUUID()),
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: commands,
VolumeMounts: volumeMounts,
},
@@ -617,7 +617,7 @@ func poweroffNodeVM(nodeName string, vm *object.VirtualMachine) {
_, err := vm.PowerOff(ctx)
Expect(err).NotTo(HaveOccurred())
err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOff)
err = vm.WaitForPowerState(ctx, vim25types.VirtualMachinePowerStatePoweredOff)
Expect(err).NotTo(HaveOccurred(), "Unable to power off the node")
}
@@ -629,7 +629,7 @@ func poweronNodeVM(nodeName string, vm *object.VirtualMachine) {
framework.Logf("Powering on node VM %s", nodeName)
vm.PowerOn(ctx)
err := vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOn)
err := vm.WaitForPowerState(ctx, vim25types.VirtualMachinePowerStatePoweredOn)
Expect(err).NotTo(HaveOccurred(), "Unable to power on the node")
}

View File

@@ -102,7 +102,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
pods = append(pods, pod)
nodeName := pod.Spec.NodeName
By(fmt.Sprintf("Verify volume %s is attached to the pod %s", volumePath, nodeName))
By(fmt.Sprintf("Verify volume %s is attached to the node %s", volumePath, nodeName))
expectVolumeToBeAttached(nodeName, volumePath)
}
@@ -119,7 +119,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
volumePath := volumePaths[i]
nodeName := pod.Spec.NodeName
By(fmt.Sprintf("After master restart, verify volume %v is attached to the pod %v", volumePath, nodeName))
By(fmt.Sprintf("After master restart, verify volume %v is attached to the node %v", volumePath, nodeName))
expectVolumeToBeAttached(nodeName, volumePath)
By(fmt.Sprintf("Deleting pod on node %s", nodeName))

View File

@@ -94,10 +94,12 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
By("Creating a Deployment")
deployment, err := framework.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "")
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create Deployment with err: %v", err))
defer client.ExtensionsV1beta1().Deployments(namespace).Delete(deployment.Name, &metav1.DeleteOptions{})
By("Get pod from the deployement")
podList, err := framework.GetPodsForDeployment(client, deployment)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to get pod from the deployement with err: %v", err))
Expect(podList.Items).NotTo(BeEmpty())
pod := podList.Items[0]
node1 := pod.Spec.NodeName