Bumping k8s dependencies to 1.13
This commit is contained in:
47
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/BUILD
generated
vendored
Normal file
47
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/BUILD
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"base.go",
|
||||
"subpath.go",
|
||||
"volume_io.go",
|
||||
"volumemode.go",
|
||||
"volumes.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/storage/testsuites",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/storage/drivers:go_default_library",
|
||||
"//test/e2e/storage/testpatterns:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
325
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/base.go
generated
vendored
Normal file
325
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/base.go
generated
vendored
Normal file
@@ -0,0 +1,325 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testsuites
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
)
|
||||
|
||||
// TestSuite represents an interface for a set of tests whchi works with TestDriver
|
||||
type TestSuite interface {
|
||||
// getTestSuiteInfo returns the TestSuiteInfo for this TestSuite
|
||||
getTestSuiteInfo() TestSuiteInfo
|
||||
// skipUnsupportedTest skips the test if this TestSuite is not suitable to be tested with the combination of TestPattern and TestDriver
|
||||
skipUnsupportedTest(testpatterns.TestPattern, drivers.TestDriver)
|
||||
// execTest executes test of the testpattern for the driver
|
||||
execTest(drivers.TestDriver, testpatterns.TestPattern)
|
||||
}
|
||||
|
||||
type TestSuiteInfo struct {
|
||||
name string // name of the TestSuite
|
||||
featureTag string // featureTag for the TestSuite
|
||||
testPatterns []testpatterns.TestPattern // Slice of TestPattern for the TestSuite
|
||||
}
|
||||
|
||||
// TestResource represents an interface for resources that is used by TestSuite
|
||||
type TestResource interface {
|
||||
// setupResource sets up test resources to be used for the tests with the
|
||||
// combination of TestDriver and TestPattern
|
||||
setupResource(drivers.TestDriver, testpatterns.TestPattern)
|
||||
// cleanupResource clean up the test resources created in SetupResource
|
||||
cleanupResource(drivers.TestDriver, testpatterns.TestPattern)
|
||||
}
|
||||
|
||||
func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string {
|
||||
tsInfo := suite.getTestSuiteInfo()
|
||||
return fmt.Sprintf("[Testpattern: %s]%s %s%s", pattern.Name, pattern.FeatureTag, tsInfo.name, tsInfo.featureTag)
|
||||
}
|
||||
|
||||
// RunTestSuite runs all testpatterns of all testSuites for a driver
|
||||
func RunTestSuite(f *framework.Framework, config framework.VolumeTestConfig, driver drivers.TestDriver, tsInits []func() TestSuite) {
|
||||
for _, testSuiteInit := range tsInits {
|
||||
suite := testSuiteInit()
|
||||
tsInfo := suite.getTestSuiteInfo()
|
||||
|
||||
for _, pattern := range tsInfo.testPatterns {
|
||||
suite.execTest(driver, pattern)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// skipUnsupportedTest will skip tests if the combination of driver, testsuite, and testpattern
|
||||
// is not suitable to be tested.
|
||||
// Whether it needs to be skipped is checked by following steps:
|
||||
// 1. Check if Whether volType is supported by driver from its interface
|
||||
// 2. Check if fsType is supported by driver
|
||||
// 3. Check with driver specific logic
|
||||
// 4. Check with testSuite specific logic
|
||||
func skipUnsupportedTest(suite TestSuite, driver drivers.TestDriver, pattern testpatterns.TestPattern) {
|
||||
dInfo := driver.GetDriverInfo()
|
||||
|
||||
// 1. Check if Whether volType is supported by driver from its interface
|
||||
var isSupported bool
|
||||
switch pattern.VolType {
|
||||
case testpatterns.InlineVolume:
|
||||
_, isSupported = driver.(drivers.InlineVolumeTestDriver)
|
||||
case testpatterns.PreprovisionedPV:
|
||||
_, isSupported = driver.(drivers.PreprovisionedPVTestDriver)
|
||||
case testpatterns.DynamicPV:
|
||||
_, isSupported = driver.(drivers.DynamicPVTestDriver)
|
||||
default:
|
||||
isSupported = false
|
||||
}
|
||||
|
||||
if !isSupported {
|
||||
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
|
||||
}
|
||||
|
||||
// 2. Check if fsType is supported by driver
|
||||
if !dInfo.SupportedFsType.Has(pattern.FsType) {
|
||||
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.FsType)
|
||||
}
|
||||
|
||||
// 3. Check with driver specific logic
|
||||
driver.SkipUnsupportedTest(pattern)
|
||||
|
||||
// 4. Check with testSuite specific logic
|
||||
suite.skipUnsupportedTest(pattern, driver)
|
||||
}
|
||||
|
||||
// genericVolumeTestResource is a generic implementation of TestResource that wil be able to
|
||||
// be used in most of TestSuites.
|
||||
// See volume_io.go or volumes.go in test/e2e/storage/testsuites/ for how to use this resource.
|
||||
// Also, see subpath.go in the same directory for how to extend and use it.
|
||||
type genericVolumeTestResource struct {
|
||||
driver drivers.TestDriver
|
||||
volType string
|
||||
volSource *v1.VolumeSource
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
pv *v1.PersistentVolume
|
||||
sc *storagev1.StorageClass
|
||||
|
||||
driverTestResource interface{}
|
||||
}
|
||||
|
||||
var _ TestResource = &genericVolumeTestResource{}
|
||||
|
||||
// SetupResource sets up genericVolumeTestResource
|
||||
func (r *genericVolumeTestResource) setupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
|
||||
r.driver = driver
|
||||
dInfo := driver.GetDriverInfo()
|
||||
f := dInfo.Framework
|
||||
cs := f.ClientSet
|
||||
fsType := pattern.FsType
|
||||
volType := pattern.VolType
|
||||
|
||||
// Create volume for pre-provisioned volume tests
|
||||
r.driverTestResource = drivers.CreateVolume(driver, volType)
|
||||
|
||||
switch volType {
|
||||
case testpatterns.InlineVolume:
|
||||
framework.Logf("Creating resource for inline volume")
|
||||
if iDriver, ok := driver.(drivers.InlineVolumeTestDriver); ok {
|
||||
r.volSource = iDriver.GetVolumeSource(false, fsType, r.driverTestResource)
|
||||
r.volType = dInfo.Name
|
||||
}
|
||||
case testpatterns.PreprovisionedPV:
|
||||
framework.Logf("Creating resource for pre-provisioned PV")
|
||||
if pDriver, ok := driver.(drivers.PreprovisionedPVTestDriver); ok {
|
||||
pvSource := pDriver.GetPersistentVolumeSource(false, fsType, r.driverTestResource)
|
||||
if pvSource != nil {
|
||||
r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPV(f, dInfo.Name, pvSource, false)
|
||||
}
|
||||
r.volType = fmt.Sprintf("%s-preprovisionedPV", dInfo.Name)
|
||||
}
|
||||
case testpatterns.DynamicPV:
|
||||
framework.Logf("Creating resource for dynamic PV")
|
||||
if dDriver, ok := driver.(drivers.DynamicPVTestDriver); ok {
|
||||
claimSize := "2Gi"
|
||||
r.sc = dDriver.GetDynamicProvisionStorageClass(fsType)
|
||||
|
||||
By("creating a StorageClass " + r.sc.Name)
|
||||
var err error
|
||||
r.sc, err = cs.StorageV1().StorageClasses().Create(r.sc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
if r.sc != nil {
|
||||
r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPVFromDynamicProvisionSC(
|
||||
f, dInfo.Name, claimSize, r.sc, false, nil)
|
||||
}
|
||||
r.volType = fmt.Sprintf("%s-dynamicPV", dInfo.Name)
|
||||
}
|
||||
default:
|
||||
framework.Failf("genericVolumeTestResource doesn't support: %s", volType)
|
||||
}
|
||||
|
||||
if r.volSource == nil {
|
||||
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, volType)
|
||||
}
|
||||
}
|
||||
|
||||
// CleanupResource clean up genericVolumeTestResource
|
||||
func (r *genericVolumeTestResource) cleanupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
|
||||
dInfo := driver.GetDriverInfo()
|
||||
f := dInfo.Framework
|
||||
volType := pattern.VolType
|
||||
|
||||
if r.pvc != nil || r.pv != nil {
|
||||
switch volType {
|
||||
case testpatterns.PreprovisionedPV:
|
||||
By("Deleting pv and pvc")
|
||||
if errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.pv, r.pvc); len(errs) != 0 {
|
||||
framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs))
|
||||
}
|
||||
case testpatterns.DynamicPV:
|
||||
By("Deleting pvc")
|
||||
// We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner
|
||||
if r.pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
|
||||
framework.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v",
|
||||
r.pv.Name, v1.PersistentVolumeReclaimDelete)
|
||||
}
|
||||
err := framework.DeletePersistentVolumeClaim(f.ClientSet, r.pvc.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to delete PVC %v", r.pvc.Name)
|
||||
err = framework.WaitForPersistentVolumeDeleted(f.ClientSet, r.pv.Name, 5*time.Second, 5*time.Minute)
|
||||
framework.ExpectNoError(err, "Persistent Volume %v not deleted by dynamic provisioner", r.pv.Name)
|
||||
default:
|
||||
framework.Failf("Found PVC (%v) or PV (%v) but not running Preprovisioned or Dynamic test pattern", r.pvc, r.pv)
|
||||
}
|
||||
}
|
||||
|
||||
if r.sc != nil {
|
||||
By("Deleting sc")
|
||||
deleteStorageClass(f.ClientSet, r.sc.Name)
|
||||
}
|
||||
|
||||
// Cleanup volume for pre-provisioned volume tests
|
||||
drivers.DeleteVolume(driver, volType, r.driverTestResource)
|
||||
}
|
||||
|
||||
func createVolumeSourceWithPVCPV(
|
||||
f *framework.Framework,
|
||||
name string,
|
||||
pvSource *v1.PersistentVolumeSource,
|
||||
readOnly bool,
|
||||
) (*v1.VolumeSource, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||
pvConfig := framework.PersistentVolumeConfig{
|
||||
NamePrefix: fmt.Sprintf("%s-", name),
|
||||
StorageClassName: f.Namespace.Name,
|
||||
PVSource: *pvSource,
|
||||
}
|
||||
pvcConfig := framework.PersistentVolumeClaimConfig{
|
||||
StorageClassName: &f.Namespace.Name,
|
||||
}
|
||||
|
||||
framework.Logf("Creating PVC and PV")
|
||||
pv, pvc, err := framework.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "PVC, PV creation failed")
|
||||
|
||||
err = framework.WaitOnPVandPVC(f.ClientSet, f.Namespace.Name, pv, pvc)
|
||||
Expect(err).NotTo(HaveOccurred(), "PVC, PV failed to bind")
|
||||
|
||||
volSource := &v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc.Name,
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
}
|
||||
return volSource, pv, pvc
|
||||
}
|
||||
|
||||
func createVolumeSourceWithPVCPVFromDynamicProvisionSC(
|
||||
f *framework.Framework,
|
||||
name string,
|
||||
claimSize string,
|
||||
sc *storagev1.StorageClass,
|
||||
readOnly bool,
|
||||
volMode *v1.PersistentVolumeMode,
|
||||
) (*v1.VolumeSource, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace.Name
|
||||
|
||||
By("creating a claim")
|
||||
pvc := getClaim(claimSize, ns)
|
||||
pvc.Spec.StorageClassName = &sc.Name
|
||||
if volMode != nil {
|
||||
pvc.Spec.VolumeMode = volMode
|
||||
}
|
||||
|
||||
var err error
|
||||
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pv, err := cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
volSource := &v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc.Name,
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
}
|
||||
return volSource, pv, pvc
|
||||
}
|
||||
|
||||
func getClaim(claimSize string, ns string) *v1.PersistentVolumeClaim {
|
||||
claim := v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pvc-",
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(claimSize),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return &claim
|
||||
}
|
||||
|
||||
// deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
|
||||
func deleteStorageClass(cs clientset.Interface, className string) {
|
||||
err := cs.StorageV1().StorageClasses().Delete(className, nil)
|
||||
if err != nil && !apierrs.IsNotFound(err) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
759
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/subpath.go
generated
vendored
Normal file
759
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/subpath.go
generated
vendored
Normal file
@@ -0,0 +1,759 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testsuites
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var (
|
||||
volumePath = "/test-volume"
|
||||
volumeName = "test-volume"
|
||||
probeVolumePath = "/probe-volume"
|
||||
probeFilePath = probeVolumePath + "/probe-file"
|
||||
fileName = "test-file"
|
||||
retryDuration = 20
|
||||
mountImage = imageutils.GetE2EImage(imageutils.Mounttest)
|
||||
)
|
||||
|
||||
type subPathTestSuite struct {
|
||||
tsInfo TestSuiteInfo
|
||||
}
|
||||
|
||||
var _ TestSuite = &subPathTestSuite{}
|
||||
|
||||
// InitSubPathTestSuite returns subPathTestSuite that implements TestSuite interface
|
||||
func InitSubPathTestSuite() TestSuite {
|
||||
return &subPathTestSuite{
|
||||
tsInfo: TestSuiteInfo{
|
||||
name: "subPath",
|
||||
testPatterns: []testpatterns.TestPattern{
|
||||
testpatterns.DefaultFsInlineVolume,
|
||||
testpatterns.DefaultFsPreprovisionedPV,
|
||||
testpatterns.DefaultFsDynamicPV,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *subPathTestSuite) getTestSuiteInfo() TestSuiteInfo {
|
||||
return s.tsInfo
|
||||
}
|
||||
|
||||
func (s *subPathTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) {
|
||||
}
|
||||
|
||||
func createSubPathTestInput(pattern testpatterns.TestPattern, resource subPathTestResource) subPathTestInput {
|
||||
driver := resource.driver
|
||||
dInfo := driver.GetDriverInfo()
|
||||
f := dInfo.Framework
|
||||
subPath := f.Namespace.Name
|
||||
subPathDir := filepath.Join(volumePath, subPath)
|
||||
|
||||
return subPathTestInput{
|
||||
f: f,
|
||||
subPathDir: subPathDir,
|
||||
filePathInSubpath: filepath.Join(volumePath, fileName),
|
||||
filePathInVolume: filepath.Join(subPathDir, fileName),
|
||||
volType: resource.volType,
|
||||
pod: resource.pod,
|
||||
formatPod: resource.formatPod,
|
||||
volSource: resource.genericVolumeTestResource.volSource,
|
||||
roVol: resource.roVolSource,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *subPathTestSuite) execTest(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
|
||||
Context(getTestNameStr(s, pattern), func() {
|
||||
var (
|
||||
resource subPathTestResource
|
||||
input subPathTestInput
|
||||
needsCleanup bool
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
needsCleanup = false
|
||||
// Skip unsupported tests to avoid unnecessary resource initialization
|
||||
skipUnsupportedTest(s, driver, pattern)
|
||||
needsCleanup = true
|
||||
|
||||
// Setup test resource for driver and testpattern
|
||||
resource = subPathTestResource{}
|
||||
resource.setupResource(driver, pattern)
|
||||
|
||||
// Create test input
|
||||
input = createSubPathTestInput(pattern, resource)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if needsCleanup {
|
||||
resource.cleanupResource(driver, pattern)
|
||||
}
|
||||
})
|
||||
|
||||
testSubPath(&input)
|
||||
})
|
||||
}
|
||||
|
||||
type subPathTestResource struct {
|
||||
genericVolumeTestResource
|
||||
|
||||
roVolSource *v1.VolumeSource
|
||||
pod *v1.Pod
|
||||
formatPod *v1.Pod
|
||||
}
|
||||
|
||||
var _ TestResource = &subPathTestResource{}
|
||||
|
||||
func (s *subPathTestResource) setupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
|
||||
s.driver = driver
|
||||
dInfo := s.driver.GetDriverInfo()
|
||||
f := dInfo.Framework
|
||||
fsType := pattern.FsType
|
||||
volType := pattern.VolType
|
||||
|
||||
// Setup generic test resource
|
||||
s.genericVolumeTestResource.setupResource(driver, pattern)
|
||||
|
||||
// Setup subPath test dependent resource
|
||||
switch volType {
|
||||
case testpatterns.InlineVolume:
|
||||
if iDriver, ok := driver.(drivers.InlineVolumeTestDriver); ok {
|
||||
s.roVolSource = iDriver.GetVolumeSource(true, fsType, s.genericVolumeTestResource.driverTestResource)
|
||||
}
|
||||
case testpatterns.PreprovisionedPV:
|
||||
s.roVolSource = &v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: s.genericVolumeTestResource.pvc.Name,
|
||||
ReadOnly: true,
|
||||
},
|
||||
}
|
||||
case testpatterns.DynamicPV:
|
||||
s.roVolSource = &v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: s.genericVolumeTestResource.pvc.Name,
|
||||
ReadOnly: true,
|
||||
},
|
||||
}
|
||||
default:
|
||||
framework.Failf("SubPath test doesn't support: %s", volType)
|
||||
}
|
||||
|
||||
subPath := f.Namespace.Name
|
||||
config := dInfo.Config
|
||||
s.pod = SubpathTestPod(f, subPath, s.volType, s.volSource, true)
|
||||
s.pod.Spec.NodeName = config.ClientNodeName
|
||||
s.pod.Spec.NodeSelector = config.NodeSelector
|
||||
|
||||
s.formatPod = volumeFormatPod(f, s.volSource)
|
||||
s.formatPod.Spec.NodeName = config.ClientNodeName
|
||||
s.formatPod.Spec.NodeSelector = config.NodeSelector
|
||||
}
|
||||
|
||||
func (s *subPathTestResource) cleanupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
|
||||
dInfo := driver.GetDriverInfo()
|
||||
f := dInfo.Framework
|
||||
|
||||
// Cleanup subPath test dependent resource
|
||||
By("Deleting pod")
|
||||
err := framework.DeletePodWithWait(f, f.ClientSet, s.pod)
|
||||
Expect(err).ToNot(HaveOccurred(), "while deleting pod")
|
||||
|
||||
// Cleanup generic test resource
|
||||
s.genericVolumeTestResource.cleanupResource(driver, pattern)
|
||||
}
|
||||
|
||||
type subPathTestInput struct {
|
||||
f *framework.Framework
|
||||
subPathDir string
|
||||
filePathInSubpath string
|
||||
filePathInVolume string
|
||||
volType string
|
||||
pod *v1.Pod
|
||||
formatPod *v1.Pod
|
||||
volSource *v1.VolumeSource
|
||||
roVol *v1.VolumeSource
|
||||
}
|
||||
|
||||
func testSubPath(input *subPathTestInput) {
|
||||
It("should support non-existent path", func() {
|
||||
// Write the file in the subPath from container 0
|
||||
setWriteCommand(input.filePathInSubpath, &input.pod.Spec.Containers[0])
|
||||
|
||||
// Read it from outside the subPath from container 1
|
||||
testReadFile(input.f, input.filePathInVolume, input.pod, 1)
|
||||
})
|
||||
|
||||
It("should support existing directory", func() {
|
||||
// Create the directory
|
||||
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir))
|
||||
|
||||
// Write the file in the subPath from container 0
|
||||
setWriteCommand(input.filePathInSubpath, &input.pod.Spec.Containers[0])
|
||||
|
||||
// Read it from outside the subPath from container 1
|
||||
testReadFile(input.f, input.filePathInVolume, input.pod, 1)
|
||||
})
|
||||
|
||||
It("should support existing single file", func() {
|
||||
// Create the file in the init container
|
||||
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", input.subPathDir, input.filePathInVolume))
|
||||
|
||||
// Read it from inside the subPath from container 0
|
||||
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
|
||||
})
|
||||
|
||||
It("should support file as subpath", func() {
|
||||
// Create the file in the init container
|
||||
setInitCommand(input.pod, fmt.Sprintf("echo %s > %s", input.f.Namespace.Name, input.subPathDir))
|
||||
|
||||
TestBasicSubpath(input.f, input.f.Namespace.Name, input.pod)
|
||||
})
|
||||
|
||||
It("should fail if subpath directory is outside the volume [Slow]", func() {
|
||||
// Create the subpath outside the volume
|
||||
setInitCommand(input.pod, fmt.Sprintf("ln -s /bin %s", input.subPathDir))
|
||||
|
||||
// Pod should fail
|
||||
testPodFailSubpath(input.f, input.pod)
|
||||
})
|
||||
|
||||
It("should fail if subpath file is outside the volume [Slow]", func() {
|
||||
// Create the subpath outside the volume
|
||||
setInitCommand(input.pod, fmt.Sprintf("ln -s /bin/sh %s", input.subPathDir))
|
||||
|
||||
// Pod should fail
|
||||
testPodFailSubpath(input.f, input.pod)
|
||||
})
|
||||
|
||||
It("should fail if non-existent subpath is outside the volume [Slow]", func() {
|
||||
// Create the subpath outside the volume
|
||||
setInitCommand(input.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", input.subPathDir))
|
||||
|
||||
// Pod should fail
|
||||
testPodFailSubpath(input.f, input.pod)
|
||||
})
|
||||
|
||||
It("should fail if subpath with backstepping is outside the volume [Slow]", func() {
|
||||
// Create the subpath outside the volume
|
||||
setInitCommand(input.pod, fmt.Sprintf("ln -s ../ %s", input.subPathDir))
|
||||
|
||||
// Pod should fail
|
||||
testPodFailSubpath(input.f, input.pod)
|
||||
})
|
||||
|
||||
It("should support creating multiple subpath from same volumes [Slow]", func() {
|
||||
subpathDir1 := filepath.Join(volumePath, "subpath1")
|
||||
subpathDir2 := filepath.Join(volumePath, "subpath2")
|
||||
filepath1 := filepath.Join("/test-subpath1", fileName)
|
||||
filepath2 := filepath.Join("/test-subpath2", fileName)
|
||||
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2))
|
||||
|
||||
addSubpathVolumeContainer(&input.pod.Spec.Containers[0], v1.VolumeMount{
|
||||
Name: volumeName,
|
||||
MountPath: "/test-subpath1",
|
||||
SubPath: "subpath1",
|
||||
})
|
||||
addSubpathVolumeContainer(&input.pod.Spec.Containers[0], v1.VolumeMount{
|
||||
Name: volumeName,
|
||||
MountPath: "/test-subpath2",
|
||||
SubPath: "subpath2",
|
||||
})
|
||||
|
||||
addMultipleWrites(&input.pod.Spec.Containers[0], filepath1, filepath2)
|
||||
testMultipleReads(input.f, input.pod, 0, filepath1, filepath2)
|
||||
})
|
||||
|
||||
It("should support restarting containers using directory as subpath [Slow]", func() {
|
||||
// Create the directory
|
||||
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %v; touch %v", input.subPathDir, probeFilePath))
|
||||
|
||||
testPodContainerRestart(input.f, input.pod)
|
||||
})
|
||||
|
||||
It("should support restarting containers using file as subpath [Slow]", func() {
|
||||
// Create the file
|
||||
setInitCommand(input.pod, fmt.Sprintf("touch %v; touch %v", input.subPathDir, probeFilePath))
|
||||
|
||||
testPodContainerRestart(input.f, input.pod)
|
||||
})
|
||||
|
||||
It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() {
|
||||
testSubpathReconstruction(input.f, input.pod, false)
|
||||
})
|
||||
|
||||
It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() {
|
||||
if input.volType == "hostPath" || input.volType == "hostPathSymlink" {
|
||||
framework.Skipf("%s volume type does not support reconstruction, skipping", input.volType)
|
||||
}
|
||||
testSubpathReconstruction(input.f, input.pod, true)
|
||||
})
|
||||
|
||||
It("should support readOnly directory specified in the volumeMount", func() {
|
||||
// Create the directory
|
||||
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir))
|
||||
|
||||
// Write the file in the volume from container 1
|
||||
setWriteCommand(input.filePathInVolume, &input.pod.Spec.Containers[1])
|
||||
|
||||
// Read it from inside the subPath from container 0
|
||||
input.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
|
||||
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
|
||||
})
|
||||
|
||||
It("should support readOnly file specified in the volumeMount", func() {
|
||||
// Create the file
|
||||
setInitCommand(input.pod, fmt.Sprintf("touch %s", input.subPathDir))
|
||||
|
||||
// Write the file in the volume from container 1
|
||||
setWriteCommand(input.subPathDir, &input.pod.Spec.Containers[1])
|
||||
|
||||
// Read it from inside the subPath from container 0
|
||||
input.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
|
||||
testReadFile(input.f, volumePath, input.pod, 0)
|
||||
})
|
||||
|
||||
It("should support existing directories when readOnly specified in the volumeSource", func() {
|
||||
if input.roVol == nil {
|
||||
framework.Skipf("Volume type %v doesn't support readOnly source", input.volType)
|
||||
}
|
||||
|
||||
// Initialize content in the volume while it's writable
|
||||
initVolumeContent(input.f, input.pod, input.filePathInVolume, input.filePathInSubpath)
|
||||
|
||||
// Set volume source to read only
|
||||
input.pod.Spec.Volumes[0].VolumeSource = *input.roVol
|
||||
|
||||
// Read it from inside the subPath from container 0
|
||||
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
|
||||
})
|
||||
|
||||
It("should fail for new directories when readOnly specified in the volumeSource", func() {
|
||||
if input.roVol == nil {
|
||||
framework.Skipf("Volume type %v doesn't support readOnly source", input.volType)
|
||||
}
|
||||
|
||||
// Format the volume while it's writable
|
||||
formatVolume(input.f, input.formatPod)
|
||||
|
||||
// Set volume source to read only
|
||||
input.pod.Spec.Volumes[0].VolumeSource = *input.roVol
|
||||
// Pod should fail
|
||||
testPodFailSubpathError(input.f, input.pod, "")
|
||||
})
|
||||
|
||||
// TODO: add a test case for the same disk with two partitions
|
||||
}
|
||||
|
||||
// TestBasicSubpath runs basic subpath test
|
||||
func TestBasicSubpath(f *framework.Framework, contents string, pod *v1.Pod) {
|
||||
TestBasicSubpathFile(f, contents, pod, volumePath)
|
||||
}
|
||||
|
||||
// TestBasicSubpathFile runs basic subpath file test
|
||||
func TestBasicSubpathFile(f *framework.Framework, contents string, pod *v1.Pod, filepath string) {
|
||||
setReadCommand(filepath, &pod.Spec.Containers[0])
|
||||
|
||||
By(fmt.Sprintf("Creating pod %s", pod.Name))
|
||||
f.TestContainerOutput("atomic-volume-subpath", pod, 0, []string{contents})
|
||||
|
||||
By(fmt.Sprintf("Deleting pod %s", pod.Name))
|
||||
err := framework.DeletePodWithWait(f, f.ClientSet, pod)
|
||||
Expect(err).NotTo(HaveOccurred(), "while deleting pod")
|
||||
}
|
||||
|
||||
// SubpathTestPod returns a pod spec for subpath tests
|
||||
func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *v1.VolumeSource, privilegedSecurityContext bool) *v1.Pod {
|
||||
var (
|
||||
suffix = strings.ToLower(fmt.Sprintf("%s-%s", volumeType, rand.String(4)))
|
||||
gracePeriod = int64(1)
|
||||
probeVolumeName = "liveness-probe-volume"
|
||||
)
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod-subpath-test-%s", suffix),
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: fmt.Sprintf("init-volume-%s", suffix),
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumePath,
|
||||
},
|
||||
{
|
||||
Name: probeVolumeName,
|
||||
MountPath: probeVolumePath,
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &privilegedSecurityContext,
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: fmt.Sprintf("test-container-subpath-%s", suffix),
|
||||
Image: mountImage,
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumePath,
|
||||
SubPath: subpath,
|
||||
},
|
||||
{
|
||||
Name: probeVolumeName,
|
||||
MountPath: probeVolumePath,
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &privilegedSecurityContext,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("test-container-volume-%s", suffix),
|
||||
Image: mountImage,
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumePath,
|
||||
},
|
||||
{
|
||||
Name: probeVolumeName,
|
||||
MountPath: probeVolumePath,
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &privilegedSecurityContext,
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
TerminationGracePeriodSeconds: &gracePeriod,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: *source,
|
||||
},
|
||||
{
|
||||
Name: probeVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
SELinuxOptions: &v1.SELinuxOptions{
|
||||
Level: "s0:c0,c1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// volumeFormatPod returns a Pod that does nothing but will cause the plugin to format a filesystem
|
||||
// on first use
|
||||
func volumeFormatPod(f *framework.Framework, volumeSource *v1.VolumeSource) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("volume-prep-%s", f.Namespace.Name),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: fmt.Sprintf("init-volume-%s", f.Namespace.Name),
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh", "-ec", "echo nothing"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: "/vol",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: *volumeSource,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func clearSubpathPodCommands(pod *v1.Pod) {
|
||||
pod.Spec.InitContainers[0].Command = nil
|
||||
pod.Spec.Containers[0].Args = nil
|
||||
pod.Spec.Containers[1].Args = nil
|
||||
}
|
||||
|
||||
func setInitCommand(pod *v1.Pod, command string) {
|
||||
pod.Spec.InitContainers[0].Command = []string{"/bin/sh", "-ec", command}
|
||||
}
|
||||
|
||||
func setWriteCommand(file string, container *v1.Container) {
|
||||
container.Args = []string{
|
||||
fmt.Sprintf("--new_file_0644=%v", file),
|
||||
fmt.Sprintf("--file_mode=%v", file),
|
||||
}
|
||||
}
|
||||
|
||||
func addSubpathVolumeContainer(container *v1.Container, volumeMount v1.VolumeMount) {
|
||||
existingMounts := container.VolumeMounts
|
||||
container.VolumeMounts = append(existingMounts, volumeMount)
|
||||
}
|
||||
|
||||
func addMultipleWrites(container *v1.Container, file1 string, file2 string) {
|
||||
container.Args = []string{
|
||||
fmt.Sprintf("--new_file_0644=%v", file1),
|
||||
fmt.Sprintf("--new_file_0666=%v", file2),
|
||||
}
|
||||
}
|
||||
|
||||
func testMultipleReads(f *framework.Framework, pod *v1.Pod, containerIndex int, file1 string, file2 string) {
|
||||
By(fmt.Sprintf("Creating pod %s", pod.Name))
|
||||
f.TestContainerOutput("multi_subpath", pod, containerIndex, []string{
|
||||
"content of file \"" + file1 + "\": mount-tester new file",
|
||||
"content of file \"" + file2 + "\": mount-tester new file",
|
||||
})
|
||||
}
|
||||
|
||||
func setReadCommand(file string, container *v1.Container) {
|
||||
container.Args = []string{
|
||||
fmt.Sprintf("--file_content_in_loop=%v", file),
|
||||
fmt.Sprintf("--retry_time=%d", retryDuration),
|
||||
}
|
||||
}
|
||||
|
||||
func testReadFile(f *framework.Framework, file string, pod *v1.Pod, containerIndex int) {
|
||||
setReadCommand(file, &pod.Spec.Containers[containerIndex])
|
||||
|
||||
By(fmt.Sprintf("Creating pod %s", pod.Name))
|
||||
f.TestContainerOutput("subpath", pod, containerIndex, []string{
|
||||
"content of file \"" + file + "\": mount-tester new file",
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("Deleting pod %s", pod.Name))
|
||||
err := framework.DeletePodWithWait(f, f.ClientSet, pod)
|
||||
Expect(err).NotTo(HaveOccurred(), "while deleting pod")
|
||||
}
|
||||
|
||||
func testPodFailSubpath(f *framework.Framework, pod *v1.Pod) {
|
||||
testPodFailSubpathError(f, pod, "subPath")
|
||||
}
|
||||
|
||||
func testPodFailSubpathError(f *framework.Framework, pod *v1.Pod, errorMsg string) {
|
||||
By(fmt.Sprintf("Creating pod %s", pod.Name))
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).ToNot(HaveOccurred(), "while creating pod")
|
||||
defer func() {
|
||||
framework.DeletePodWithWait(f, f.ClientSet, pod)
|
||||
}()
|
||||
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
|
||||
Expect(err).To(HaveOccurred(), "while waiting for pod to be running")
|
||||
|
||||
By("Checking for subpath error event")
|
||||
selector := fields.Set{
|
||||
"involvedObject.kind": "Pod",
|
||||
"involvedObject.name": pod.Name,
|
||||
"involvedObject.namespace": f.Namespace.Name,
|
||||
"reason": "Failed",
|
||||
}.AsSelector().String()
|
||||
options := metav1.ListOptions{FieldSelector: selector}
|
||||
events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "while getting pod events")
|
||||
Expect(len(events.Items)).NotTo(Equal(0), "no events found")
|
||||
Expect(events.Items[0].Message).To(ContainSubstring(errorMsg), fmt.Sprintf("%q error not found", errorMsg))
|
||||
}
|
||||
|
||||
// Tests that the existing subpath mount is detected when a container restarts
|
||||
func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
|
||||
pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure
|
||||
|
||||
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.BusyBox)
|
||||
pod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
|
||||
pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox)
|
||||
pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
|
||||
|
||||
// Add liveness probe to subpath container
|
||||
pod.Spec.Containers[0].LivenessProbe = &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: []string{"cat", probeFilePath},
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 1,
|
||||
FailureThreshold: 1,
|
||||
PeriodSeconds: 2,
|
||||
}
|
||||
|
||||
// Start pod
|
||||
By(fmt.Sprintf("Creating pod %s", pod.Name))
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).ToNot(HaveOccurred(), "while creating pod")
|
||||
defer func() {
|
||||
framework.DeletePodWithWait(f, f.ClientSet, pod)
|
||||
}()
|
||||
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
|
||||
Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running")
|
||||
|
||||
By("Failing liveness probe")
|
||||
out, err := podContainerExec(pod, 1, fmt.Sprintf("rm %v", probeFilePath))
|
||||
framework.Logf("Pod exec output: %v", out)
|
||||
Expect(err).ToNot(HaveOccurred(), "while failing liveness probe")
|
||||
|
||||
// Check that container has restarted
|
||||
By("Waiting for container to restart")
|
||||
restarts := int32(0)
|
||||
err = wait.PollImmediate(10*time.Second, 2*time.Minute, func() (bool, error) {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, status := range pod.Status.ContainerStatuses {
|
||||
if status.Name == pod.Spec.Containers[0].Name {
|
||||
framework.Logf("Container %v, restarts: %v", status.Name, status.RestartCount)
|
||||
restarts = status.RestartCount
|
||||
if restarts > 0 {
|
||||
framework.Logf("Container has restart count: %v", restarts)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred(), "while waiting for container to restart")
|
||||
|
||||
// Fix liveness probe
|
||||
By("Rewriting the file")
|
||||
writeCmd := fmt.Sprintf("echo test-after > %v", probeFilePath)
|
||||
out, err = podContainerExec(pod, 1, writeCmd)
|
||||
framework.Logf("Pod exec output: %v", out)
|
||||
Expect(err).ToNot(HaveOccurred(), "while rewriting the probe file")
|
||||
|
||||
// Wait for container restarts to stabilize
|
||||
By("Waiting for container to stop restarting")
|
||||
stableCount := int(0)
|
||||
stableThreshold := int(time.Minute / framework.Poll)
|
||||
err = wait.PollImmediate(framework.Poll, 2*time.Minute, func() (bool, error) {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, status := range pod.Status.ContainerStatuses {
|
||||
if status.Name == pod.Spec.Containers[0].Name {
|
||||
if status.RestartCount == restarts {
|
||||
stableCount++
|
||||
if stableCount > stableThreshold {
|
||||
framework.Logf("Container restart has stabilized")
|
||||
return true, nil
|
||||
}
|
||||
} else {
|
||||
restarts = status.RestartCount
|
||||
stableCount = 0
|
||||
framework.Logf("Container has restart count: %v", restarts)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred(), "while waiting for container to stabilize")
|
||||
}
|
||||
|
||||
func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete bool) {
|
||||
// This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption()
|
||||
|
||||
// Change to busybox
|
||||
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.BusyBox)
|
||||
pod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
|
||||
pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox)
|
||||
pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
|
||||
|
||||
// If grace period is too short, then there is not enough time for the volume
|
||||
// manager to cleanup the volumes
|
||||
gracePeriod := int64(30)
|
||||
pod.Spec.TerminationGracePeriodSeconds = &gracePeriod
|
||||
|
||||
By(fmt.Sprintf("Creating pod %s", pod.Name))
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).ToNot(HaveOccurred(), "while creating pod")
|
||||
|
||||
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
|
||||
Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running")
|
||||
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).ToNot(HaveOccurred(), "while getting pod")
|
||||
|
||||
utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true)
|
||||
}
|
||||
|
||||
func formatVolume(f *framework.Framework, pod *v1.Pod) {
|
||||
By(fmt.Sprintf("Creating pod to format volume %s", pod.Name))
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).ToNot(HaveOccurred(), "while creating volume init pod")
|
||||
|
||||
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
|
||||
Expect(err).ToNot(HaveOccurred(), "while waiting for volume init pod to succeed")
|
||||
|
||||
err = framework.DeletePodWithWait(f, f.ClientSet, pod)
|
||||
Expect(err).ToNot(HaveOccurred(), "while deleting volume init pod")
|
||||
}
|
||||
|
||||
func initVolumeContent(f *framework.Framework, pod *v1.Pod, volumeFilepath, subpathFilepath string) {
|
||||
setWriteCommand(volumeFilepath, &pod.Spec.Containers[1])
|
||||
setReadCommand(subpathFilepath, &pod.Spec.Containers[0])
|
||||
|
||||
By(fmt.Sprintf("Creating pod to write volume content %s", pod.Name))
|
||||
f.TestContainerOutput("subpath", pod, 0, []string{
|
||||
"content of file \"" + subpathFilepath + "\": mount-tester new file",
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("Deleting pod %s", pod.Name))
|
||||
err := framework.DeletePodWithWait(f, f.ClientSet, pod)
|
||||
Expect(err).NotTo(HaveOccurred(), "while deleting pod")
|
||||
|
||||
// This pod spec is going to be reused; reset all the commands
|
||||
clearSubpathPodCommands(pod)
|
||||
}
|
||||
|
||||
func podContainerExec(pod *v1.Pod, containerIndex int, bashExec string) (string, error) {
|
||||
return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", "/bin/sh", "-c", bashExec)
|
||||
}
|
||||
359
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_io.go
generated
vendored
Normal file
359
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_io.go
generated
vendored
Normal file
@@ -0,0 +1,359 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This test checks that the plugin VolumeSources are working when pseudo-streaming
|
||||
* various write sizes to mounted files.
|
||||
*/
|
||||
|
||||
package testsuites
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
// MD5 hashes of the test file corresponding to each file size.
|
||||
// Test files are generated in testVolumeIO()
|
||||
// If test file generation algorithm changes, these must be recomputed.
|
||||
var md5hashes = map[int64]string{
|
||||
testpatterns.FileSizeSmall: "5c34c2813223a7ca05a3c2f38c0d1710",
|
||||
testpatterns.FileSizeMedium: "f2fa202b1ffeedda5f3a58bd1ae81104",
|
||||
testpatterns.FileSizeLarge: "8d763edc71bd16217664793b5a15e403",
|
||||
}
|
||||
|
||||
type volumeIOTestSuite struct {
|
||||
tsInfo TestSuiteInfo
|
||||
}
|
||||
|
||||
var _ TestSuite = &volumeIOTestSuite{}
|
||||
|
||||
// InitVolumeIOTestSuite returns volumeIOTestSuite that implements TestSuite interface
|
||||
func InitVolumeIOTestSuite() TestSuite {
|
||||
return &volumeIOTestSuite{
|
||||
tsInfo: TestSuiteInfo{
|
||||
name: "volumeIO",
|
||||
testPatterns: []testpatterns.TestPattern{
|
||||
testpatterns.DefaultFsInlineVolume,
|
||||
testpatterns.DefaultFsPreprovisionedPV,
|
||||
testpatterns.DefaultFsDynamicPV,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (t *volumeIOTestSuite) getTestSuiteInfo() TestSuiteInfo {
|
||||
return t.tsInfo
|
||||
}
|
||||
|
||||
func (t *volumeIOTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) {
|
||||
}
|
||||
|
||||
func createVolumeIOTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumeIOTestInput {
|
||||
var fsGroup *int64
|
||||
driver := resource.driver
|
||||
dInfo := driver.GetDriverInfo()
|
||||
f := dInfo.Framework
|
||||
fileSizes := createFileSizes(dInfo.MaxFileSize)
|
||||
volSource := resource.volSource
|
||||
|
||||
if volSource == nil {
|
||||
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
|
||||
}
|
||||
|
||||
if dInfo.IsFsGroupSupported {
|
||||
fsGroupVal := int64(1234)
|
||||
fsGroup = &fsGroupVal
|
||||
}
|
||||
|
||||
return volumeIOTestInput{
|
||||
f: f,
|
||||
name: dInfo.Name,
|
||||
config: dInfo.Config,
|
||||
volSource: *volSource,
|
||||
testFile: fmt.Sprintf("%s_io_test_%s", dInfo.Name, f.Namespace.Name),
|
||||
podSec: v1.PodSecurityContext{
|
||||
FSGroup: fsGroup,
|
||||
},
|
||||
fileSizes: fileSizes,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *volumeIOTestSuite) execTest(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
|
||||
Context(getTestNameStr(t, pattern), func() {
|
||||
var (
|
||||
resource genericVolumeTestResource
|
||||
input volumeIOTestInput
|
||||
needsCleanup bool
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
needsCleanup = false
|
||||
// Skip unsupported tests to avoid unnecessary resource initialization
|
||||
skipUnsupportedTest(t, driver, pattern)
|
||||
needsCleanup = true
|
||||
|
||||
// Setup test resource for driver and testpattern
|
||||
resource = genericVolumeTestResource{}
|
||||
resource.setupResource(driver, pattern)
|
||||
|
||||
// Create test input
|
||||
input = createVolumeIOTestInput(pattern, resource)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if needsCleanup {
|
||||
resource.cleanupResource(driver, pattern)
|
||||
}
|
||||
})
|
||||
|
||||
execTestVolumeIO(&input)
|
||||
})
|
||||
}
|
||||
|
||||
type volumeIOTestInput struct {
|
||||
f *framework.Framework
|
||||
name string
|
||||
config framework.VolumeTestConfig
|
||||
volSource v1.VolumeSource
|
||||
testFile string
|
||||
podSec v1.PodSecurityContext
|
||||
fileSizes []int64
|
||||
}
|
||||
|
||||
func execTestVolumeIO(input *volumeIOTestInput) {
|
||||
It("should write files of various sizes, verify size, validate content [Slow]", func() {
|
||||
f := input.f
|
||||
cs := f.ClientSet
|
||||
|
||||
err := testVolumeIO(f, cs, input.config, input.volSource, &input.podSec, input.testFile, input.fileSizes)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
}
|
||||
|
||||
func createFileSizes(maxFileSize int64) []int64 {
|
||||
allFileSizes := []int64{
|
||||
testpatterns.FileSizeSmall,
|
||||
testpatterns.FileSizeMedium,
|
||||
testpatterns.FileSizeLarge,
|
||||
}
|
||||
fileSizes := []int64{}
|
||||
|
||||
for _, size := range allFileSizes {
|
||||
if size <= maxFileSize {
|
||||
fileSizes = append(fileSizes, size)
|
||||
}
|
||||
}
|
||||
|
||||
return fileSizes
|
||||
}
|
||||
|
||||
// Return the plugin's client pod spec. Use an InitContainer to setup the file i/o test env.
|
||||
func makePodSpec(config framework.VolumeTestConfig, dir, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod {
|
||||
volName := fmt.Sprintf("%s-%s", config.Prefix, "io-volume")
|
||||
|
||||
var gracePeriod int64 = 1
|
||||
return &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-io-client",
|
||||
Labels: map[string]string{
|
||||
"role": config.Prefix + "-io-client",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: config.Prefix + "-io-init",
|
||||
Image: framework.BusyBoxImage,
|
||||
Command: []string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
initCmd,
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volName,
|
||||
MountPath: dir,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.Prefix + "-io-client",
|
||||
Image: framework.BusyBoxImage,
|
||||
Command: []string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"sleep 3600", // keep pod alive until explicitly deleted
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volName,
|
||||
MountPath: dir,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
TerminationGracePeriodSeconds: &gracePeriod,
|
||||
SecurityContext: podSecContext,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volName,
|
||||
VolumeSource: volsrc,
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever, // want pod to fail if init container fails
|
||||
NodeName: config.ClientNodeName,
|
||||
NodeSelector: config.NodeSelector,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Write `fsize` bytes to `fpath` in the pod, using dd and the `ddInput` file.
|
||||
func writeToFile(pod *v1.Pod, fpath, ddInput string, fsize int64) error {
|
||||
By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath))
|
||||
loopCnt := fsize / testpatterns.MinFileSize
|
||||
writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, ddInput, testpatterns.MinFileSize, fpath)
|
||||
_, err := utils.PodExec(pod, writeCmd)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify that the test file is the expected size and contains the expected content.
|
||||
func verifyFile(pod *v1.Pod, fpath string, expectSize int64, ddInput string) error {
|
||||
By("verifying file size")
|
||||
rtnstr, err := utils.PodExec(pod, fmt.Sprintf("stat -c %%s %s", fpath))
|
||||
if err != nil || rtnstr == "" {
|
||||
return fmt.Errorf("unable to get file size via `stat %s`: %v", fpath, err)
|
||||
}
|
||||
size, err := strconv.Atoi(strings.TrimSuffix(rtnstr, "\n"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to convert string %q to int: %v", rtnstr, err)
|
||||
}
|
||||
if int64(size) != expectSize {
|
||||
return fmt.Errorf("size of file %s is %d, expected %d", fpath, size, expectSize)
|
||||
}
|
||||
|
||||
By("verifying file hash")
|
||||
rtnstr, err = utils.PodExec(pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to test file hash via `md5sum %s`: %v", fpath, err)
|
||||
}
|
||||
actualHash := strings.TrimSuffix(rtnstr, "\n")
|
||||
expectedHash, ok := md5hashes[expectSize]
|
||||
if !ok {
|
||||
return fmt.Errorf("File hash is unknown for file size %d. Was a new file size added to the test suite?",
|
||||
expectSize)
|
||||
}
|
||||
if actualHash != expectedHash {
|
||||
return fmt.Errorf("MD5 hash is incorrect for file %s with size %d. Expected: `%s`; Actual: `%s`",
|
||||
fpath, expectSize, expectedHash, actualHash)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete `fpath` to save some disk space on host. Delete errors are logged but ignored.
|
||||
func deleteFile(pod *v1.Pod, fpath string) {
|
||||
By(fmt.Sprintf("deleting test file %s...", fpath))
|
||||
_, err := utils.PodExec(pod, fmt.Sprintf("rm -f %s", fpath))
|
||||
if err != nil {
|
||||
// keep going, the test dir will be deleted when the volume is unmounted
|
||||
framework.Logf("unable to delete test file %s: %v\nerror ignored, continuing test", fpath, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create the client pod and create files of the sizes passed in by the `fsizes` parameter. Delete the
|
||||
// client pod and the new files when done.
|
||||
// Note: the file name is appended to "/opt/<Prefix>/<namespace>", eg. "/opt/nfs/e2e-.../<file>".
|
||||
// Note: nil can be passed for the podSecContext parm, in which case it is ignored.
|
||||
// Note: `fsizes` values are enforced to each be at least `MinFileSize` and a multiple of `MinFileSize`
|
||||
// bytes.
|
||||
func testVolumeIO(f *framework.Framework, cs clientset.Interface, config framework.VolumeTestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) {
|
||||
dir := path.Join("/opt", config.Prefix, config.Namespace)
|
||||
ddInput := path.Join(dir, "dd_if")
|
||||
writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value
|
||||
loopCnt := testpatterns.MinFileSize / int64(len(writeBlk))
|
||||
// initContainer cmd to create and fill dd's input file. The initContainer is used to create
|
||||
// the `dd` input file which is currently 1MiB. Rather than store a 1MiB go value, a loop is
|
||||
// used to create a 1MiB file in the target directory.
|
||||
initCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do echo -n %s >>%s; let i+=1; done", loopCnt, writeBlk, ddInput)
|
||||
|
||||
clientPod := makePodSpec(config, dir, initCmd, volsrc, podSecContext)
|
||||
|
||||
By(fmt.Sprintf("starting %s", clientPod.Name))
|
||||
podsNamespacer := cs.CoreV1().Pods(config.Namespace)
|
||||
clientPod, err = podsNamespacer.Create(clientPod)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err)
|
||||
}
|
||||
defer func() {
|
||||
// note the test dir will be removed when the kubelet unmounts it
|
||||
By(fmt.Sprintf("deleting client pod %q...", clientPod.Name))
|
||||
e := framework.DeletePodWithWait(f, cs, clientPod)
|
||||
if e != nil {
|
||||
framework.Logf("client pod failed to delete: %v", e)
|
||||
if err == nil { // delete err is returned if err is not set
|
||||
err = e
|
||||
}
|
||||
} else {
|
||||
framework.Logf("sleeping a bit so kubelet can unmount and detach the volume")
|
||||
time.Sleep(framework.PodCleanupTimeout)
|
||||
}
|
||||
}()
|
||||
|
||||
err = framework.WaitForPodRunningInNamespace(cs, clientPod)
|
||||
if err != nil {
|
||||
return fmt.Errorf("client pod %q not running: %v", clientPod.Name, err)
|
||||
}
|
||||
|
||||
// create files of the passed-in file sizes and verify test file size and content
|
||||
for _, fsize := range fsizes {
|
||||
// file sizes must be a multiple of `MinFileSize`
|
||||
if math.Mod(float64(fsize), float64(testpatterns.MinFileSize)) != 0 {
|
||||
fsize = fsize/testpatterns.MinFileSize + testpatterns.MinFileSize
|
||||
}
|
||||
fpath := path.Join(dir, fmt.Sprintf("%s-%d", file, fsize))
|
||||
if err = writeToFile(clientPod, fpath, ddInput, fsize); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = verifyFile(clientPod, fpath, fsize, ddInput); err != nil {
|
||||
return err
|
||||
}
|
||||
deleteFile(clientPod, fpath)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
445
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumemode.go
generated
vendored
Normal file
445
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumemode.go
generated
vendored
Normal file
@@ -0,0 +1,445 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testsuites
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
noProvisioner = "kubernetes.io/no-provisioner"
|
||||
pvNamePrefix = "pv"
|
||||
)
|
||||
|
||||
type volumeModeTestSuite struct {
|
||||
tsInfo TestSuiteInfo
|
||||
}
|
||||
|
||||
var _ TestSuite = &volumeModeTestSuite{}
|
||||
|
||||
// InitVolumeModeTestSuite returns volumeModeTestSuite that implements TestSuite interface
|
||||
func InitVolumeModeTestSuite() TestSuite {
|
||||
return &volumeModeTestSuite{
|
||||
tsInfo: TestSuiteInfo{
|
||||
name: "volumeMode",
|
||||
featureTag: "[Feature:BlockVolume]",
|
||||
testPatterns: []testpatterns.TestPattern{
|
||||
testpatterns.FsVolModePreprovisionedPV,
|
||||
testpatterns.FsVolModeDynamicPV,
|
||||
testpatterns.BlockVolModePreprovisionedPV,
|
||||
testpatterns.BlockVolModeDynamicPV,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (t *volumeModeTestSuite) getTestSuiteInfo() TestSuiteInfo {
|
||||
return t.tsInfo
|
||||
}
|
||||
|
||||
func (t *volumeModeTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) {
|
||||
}
|
||||
|
||||
func createVolumeModeTestInput(pattern testpatterns.TestPattern, resource volumeModeTestResource) volumeModeTestInput {
|
||||
driver := resource.driver
|
||||
dInfo := driver.GetDriverInfo()
|
||||
f := dInfo.Framework
|
||||
|
||||
return volumeModeTestInput{
|
||||
f: f,
|
||||
sc: resource.sc,
|
||||
pvc: resource.pvc,
|
||||
pv: resource.pv,
|
||||
testVolType: pattern.VolType,
|
||||
nodeName: dInfo.Config.ClientNodeName,
|
||||
volMode: pattern.VolMode,
|
||||
isBlockSupported: dInfo.IsBlockSupported,
|
||||
}
|
||||
}
|
||||
|
||||
func getVolumeModeTestFunc(pattern testpatterns.TestPattern, driver drivers.TestDriver) func(*volumeModeTestInput) {
|
||||
dInfo := driver.GetDriverInfo()
|
||||
isBlockSupported := dInfo.IsBlockSupported
|
||||
volMode := pattern.VolMode
|
||||
volType := pattern.VolType
|
||||
|
||||
switch volType {
|
||||
case testpatterns.PreprovisionedPV:
|
||||
if volMode == v1.PersistentVolumeBlock && !isBlockSupported {
|
||||
return testVolumeModeFailForPreprovisionedPV
|
||||
}
|
||||
return testVolumeModeSuccessForPreprovisionedPV
|
||||
case testpatterns.DynamicPV:
|
||||
if volMode == v1.PersistentVolumeBlock && !isBlockSupported {
|
||||
return testVolumeModeFailForDynamicPV
|
||||
}
|
||||
return testVolumeModeSuccessForDynamicPV
|
||||
default:
|
||||
framework.Failf("Volume mode test doesn't support volType: %v", volType)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *volumeModeTestSuite) execTest(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
|
||||
Context(getTestNameStr(t, pattern), func() {
|
||||
var (
|
||||
resource volumeModeTestResource
|
||||
input volumeModeTestInput
|
||||
testFunc func(*volumeModeTestInput)
|
||||
needsCleanup bool
|
||||
)
|
||||
|
||||
testFunc = getVolumeModeTestFunc(pattern, driver)
|
||||
|
||||
BeforeEach(func() {
|
||||
needsCleanup = false
|
||||
// Skip unsupported tests to avoid unnecessary resource initialization
|
||||
skipUnsupportedTest(t, driver, pattern)
|
||||
needsCleanup = true
|
||||
|
||||
// Setup test resource for driver and testpattern
|
||||
resource = volumeModeTestResource{}
|
||||
resource.setupResource(driver, pattern)
|
||||
|
||||
// Create test input
|
||||
input = createVolumeModeTestInput(pattern, resource)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if needsCleanup {
|
||||
resource.cleanupResource(driver, pattern)
|
||||
}
|
||||
})
|
||||
|
||||
testFunc(&input)
|
||||
})
|
||||
}
|
||||
|
||||
type volumeModeTestResource struct {
|
||||
driver drivers.TestDriver
|
||||
|
||||
sc *storagev1.StorageClass
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
pv *v1.PersistentVolume
|
||||
|
||||
driverTestResource interface{}
|
||||
}
|
||||
|
||||
var _ TestResource = &volumeModeTestResource{}
|
||||
|
||||
func (s *volumeModeTestResource) setupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
|
||||
s.driver = driver
|
||||
dInfo := driver.GetDriverInfo()
|
||||
f := dInfo.Framework
|
||||
ns := f.Namespace
|
||||
fsType := pattern.FsType
|
||||
volBindMode := storagev1.VolumeBindingImmediate
|
||||
volMode := pattern.VolMode
|
||||
volType := pattern.VolType
|
||||
|
||||
var (
|
||||
scName string
|
||||
pvSource *v1.PersistentVolumeSource
|
||||
)
|
||||
|
||||
// Create volume for pre-provisioned volume tests
|
||||
s.driverTestResource = drivers.CreateVolume(driver, volType)
|
||||
|
||||
switch volType {
|
||||
case testpatterns.PreprovisionedPV:
|
||||
if volMode == v1.PersistentVolumeBlock {
|
||||
scName = fmt.Sprintf("%s-%s-sc-for-block", ns.Name, dInfo.Name)
|
||||
} else if volMode == v1.PersistentVolumeFilesystem {
|
||||
scName = fmt.Sprintf("%s-%s-sc-for-file", ns.Name, dInfo.Name)
|
||||
}
|
||||
if pDriver, ok := driver.(drivers.PreprovisionedPVTestDriver); ok {
|
||||
pvSource = pDriver.GetPersistentVolumeSource(false, fsType, s.driverTestResource)
|
||||
if pvSource == nil {
|
||||
framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name)
|
||||
}
|
||||
|
||||
sc, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, volMode, *pvSource)
|
||||
s.sc = sc
|
||||
s.pv = framework.MakePersistentVolume(pvConfig)
|
||||
s.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns.Name)
|
||||
}
|
||||
case testpatterns.DynamicPV:
|
||||
if dDriver, ok := driver.(drivers.DynamicPVTestDriver); ok {
|
||||
s.sc = dDriver.GetDynamicProvisionStorageClass(fsType)
|
||||
if s.sc == nil {
|
||||
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
|
||||
}
|
||||
s.sc.VolumeBindingMode = &volBindMode
|
||||
|
||||
claimSize := "2Gi"
|
||||
s.pvc = getClaim(claimSize, ns.Name)
|
||||
s.pvc.Spec.StorageClassName = &s.sc.Name
|
||||
s.pvc.Spec.VolumeMode = &volMode
|
||||
}
|
||||
default:
|
||||
framework.Failf("Volume mode test doesn't support: %s", volType)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *volumeModeTestResource) cleanupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
|
||||
dInfo := driver.GetDriverInfo()
|
||||
f := dInfo.Framework
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace
|
||||
volType := pattern.VolType
|
||||
|
||||
By("Deleting pv and pvc")
|
||||
errs := framework.PVPVCCleanup(cs, ns.Name, s.pv, s.pvc)
|
||||
if len(errs) > 0 {
|
||||
framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
|
||||
}
|
||||
By("Deleting sc")
|
||||
if s.sc != nil {
|
||||
deleteStorageClass(cs, s.sc.Name)
|
||||
}
|
||||
|
||||
// Cleanup volume for pre-provisioned volume tests
|
||||
drivers.DeleteVolume(driver, volType, s.driverTestResource)
|
||||
}
|
||||
|
||||
type volumeModeTestInput struct {
|
||||
f *framework.Framework
|
||||
sc *storagev1.StorageClass
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
pv *v1.PersistentVolume
|
||||
testVolType testpatterns.TestVolType
|
||||
nodeName string
|
||||
volMode v1.PersistentVolumeMode
|
||||
isBlockSupported bool
|
||||
}
|
||||
|
||||
func testVolumeModeFailForPreprovisionedPV(input *volumeModeTestInput) {
|
||||
It("should fail to create pod by failing to mount volume", func() {
|
||||
f := input.f
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace
|
||||
var err error
|
||||
|
||||
By("Creating sc")
|
||||
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating pv and pvc")
|
||||
input.pv, err = cs.CoreV1().PersistentVolumes().Create(input.pv)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Prebind pv
|
||||
input.pvc.Spec.VolumeName = input.pv.Name
|
||||
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, input.pv, input.pvc))
|
||||
|
||||
By("Creating pod")
|
||||
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc},
|
||||
false, "", false, false, framework.SELinuxLabel,
|
||||
nil, input.nodeName, framework.PodStartTimeout)
|
||||
defer func() {
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
|
||||
}()
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
}
|
||||
|
||||
func testVolumeModeSuccessForPreprovisionedPV(input *volumeModeTestInput) {
|
||||
It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
|
||||
f := input.f
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace
|
||||
var err error
|
||||
|
||||
By("Creating sc")
|
||||
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating pv and pvc")
|
||||
input.pv, err = cs.CoreV1().PersistentVolumes().Create(input.pv)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Prebind pv
|
||||
input.pvc.Spec.VolumeName = input.pv.Name
|
||||
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, input.pv, input.pvc))
|
||||
|
||||
By("Creating pod")
|
||||
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc},
|
||||
false, "", false, false, framework.SELinuxLabel,
|
||||
nil, input.nodeName, framework.PodStartTimeout)
|
||||
defer func() {
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
|
||||
}()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Checking if persistent volume exists as expected volume mode")
|
||||
checkVolumeModeOfPath(pod, input.volMode, "/mnt/volume1")
|
||||
|
||||
By("Checking if read/write to persistent volume works properly")
|
||||
checkReadWriteToPath(pod, input.volMode, "/mnt/volume1")
|
||||
})
|
||||
// TODO(mkimuram): Add more tests
|
||||
}
|
||||
|
||||
func testVolumeModeFailForDynamicPV(input *volumeModeTestInput) {
|
||||
It("should fail in binding dynamic provisioned PV to PVC", func() {
|
||||
f := input.f
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace
|
||||
var err error
|
||||
|
||||
By("Creating sc")
|
||||
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating pv and pvc")
|
||||
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, input.pvc.Namespace, input.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
}
|
||||
|
||||
func testVolumeModeSuccessForDynamicPV(input *volumeModeTestInput) {
|
||||
It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
|
||||
f := input.f
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace
|
||||
var err error
|
||||
|
||||
By("Creating sc")
|
||||
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating pv and pvc")
|
||||
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, input.pvc.Namespace, input.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(input.pvc.Namespace).Get(input.pvc.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
input.pv, err = cs.CoreV1().PersistentVolumes().Get(input.pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating pod")
|
||||
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc},
|
||||
false, "", false, false, framework.SELinuxLabel,
|
||||
nil, input.nodeName, framework.PodStartTimeout)
|
||||
defer func() {
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
|
||||
}()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Checking if persistent volume exists as expected volume mode")
|
||||
checkVolumeModeOfPath(pod, input.volMode, "/mnt/volume1")
|
||||
|
||||
By("Checking if read/write to persistent volume works properly")
|
||||
checkReadWriteToPath(pod, input.volMode, "/mnt/volume1")
|
||||
})
|
||||
// TODO(mkimuram): Add more tests
|
||||
}
|
||||
|
||||
func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1.VolumeBindingMode,
|
||||
volMode v1.PersistentVolumeMode, pvSource v1.PersistentVolumeSource) (*storagev1.StorageClass,
|
||||
framework.PersistentVolumeConfig, framework.PersistentVolumeClaimConfig) {
|
||||
// StorageClass
|
||||
scConfig := &storagev1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scName,
|
||||
},
|
||||
Provisioner: noProvisioner,
|
||||
VolumeBindingMode: &volBindMode,
|
||||
}
|
||||
// PV
|
||||
pvConfig := framework.PersistentVolumeConfig{
|
||||
PVSource: pvSource,
|
||||
NamePrefix: pvNamePrefix,
|
||||
StorageClassName: scName,
|
||||
VolumeMode: &volMode,
|
||||
}
|
||||
// PVC
|
||||
pvcConfig := framework.PersistentVolumeClaimConfig{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
StorageClassName: &scName,
|
||||
VolumeMode: &volMode,
|
||||
}
|
||||
|
||||
return scConfig, pvConfig, pvcConfig
|
||||
}
|
||||
|
||||
func checkVolumeModeOfPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
|
||||
if volMode == v1.PersistentVolumeBlock {
|
||||
// Check if block exists
|
||||
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("test -b %s", path))
|
||||
|
||||
// Double check that it's not directory
|
||||
utils.VerifyExecInPodFail(pod, fmt.Sprintf("test -d %s", path), 1)
|
||||
} else {
|
||||
// Check if directory exists
|
||||
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("test -d %s", path))
|
||||
|
||||
// Double check that it's not block
|
||||
utils.VerifyExecInPodFail(pod, fmt.Sprintf("test -b %s", path), 1)
|
||||
}
|
||||
}
|
||||
|
||||
func checkReadWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
|
||||
if volMode == v1.PersistentVolumeBlock {
|
||||
// random -> file1
|
||||
utils.VerifyExecInPodSucceed(pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
|
||||
// file1 -> dev (write to dev)
|
||||
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
|
||||
// dev -> file2 (read from dev)
|
||||
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
|
||||
// file1 == file2 (check contents)
|
||||
utils.VerifyExecInPodSucceed(pod, "diff /tmp/file1 /tmp/file2")
|
||||
// Clean up temp files
|
||||
utils.VerifyExecInPodSucceed(pod, "rm -f /tmp/file1 /tmp/file2")
|
||||
|
||||
// Check that writing file to block volume fails
|
||||
utils.VerifyExecInPodFail(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
|
||||
} else {
|
||||
// text -> file1 (write to file)
|
||||
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
|
||||
// grep file1 (read from file and check contents)
|
||||
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("grep 'Hello world.' %s/file1.txt", path))
|
||||
|
||||
// Check that writing to directory as block volume fails
|
||||
utils.VerifyExecInPodFail(pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
|
||||
}
|
||||
}
|
||||
160
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumes.go
generated
vendored
Normal file
160
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumes.go
generated
vendored
Normal file
@@ -0,0 +1,160 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This test checks that various VolumeSources are working.
|
||||
|
||||
// test/e2e/common/volumes.go duplicates the GlusterFS test from this file. Any changes made to this
|
||||
// test should be made there as well.
|
||||
|
||||
package testsuites
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
)
|
||||
|
||||
type volumesTestSuite struct {
|
||||
tsInfo TestSuiteInfo
|
||||
}
|
||||
|
||||
var _ TestSuite = &volumesTestSuite{}
|
||||
|
||||
// InitVolumesTestSuite returns volumesTestSuite that implements TestSuite interface
|
||||
func InitVolumesTestSuite() TestSuite {
|
||||
return &volumesTestSuite{
|
||||
tsInfo: TestSuiteInfo{
|
||||
name: "volumes",
|
||||
testPatterns: []testpatterns.TestPattern{
|
||||
// Default fsType
|
||||
testpatterns.DefaultFsInlineVolume,
|
||||
testpatterns.DefaultFsPreprovisionedPV,
|
||||
testpatterns.DefaultFsDynamicPV,
|
||||
// ext3
|
||||
testpatterns.Ext3InlineVolume,
|
||||
testpatterns.Ext3PreprovisionedPV,
|
||||
testpatterns.Ext3DynamicPV,
|
||||
// ext4
|
||||
testpatterns.Ext4InlineVolume,
|
||||
testpatterns.Ext4PreprovisionedPV,
|
||||
testpatterns.Ext4DynamicPV,
|
||||
// xfs
|
||||
testpatterns.XfsInlineVolume,
|
||||
testpatterns.XfsPreprovisionedPV,
|
||||
testpatterns.XfsDynamicPV,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (t *volumesTestSuite) getTestSuiteInfo() TestSuiteInfo {
|
||||
return t.tsInfo
|
||||
}
|
||||
|
||||
func (t *volumesTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) {
|
||||
dInfo := driver.GetDriverInfo()
|
||||
if !dInfo.IsPersistent {
|
||||
framework.Skipf("Driver %q does not provide persistency - skipping", dInfo.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func createVolumesTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumesTestInput {
|
||||
var fsGroup *int64
|
||||
driver := resource.driver
|
||||
dInfo := driver.GetDriverInfo()
|
||||
f := dInfo.Framework
|
||||
volSource := resource.volSource
|
||||
|
||||
if volSource == nil {
|
||||
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
|
||||
}
|
||||
|
||||
if dInfo.IsFsGroupSupported {
|
||||
fsGroupVal := int64(1234)
|
||||
fsGroup = &fsGroupVal
|
||||
}
|
||||
|
||||
return volumesTestInput{
|
||||
f: f,
|
||||
name: dInfo.Name,
|
||||
config: dInfo.Config,
|
||||
fsGroup: fsGroup,
|
||||
tests: []framework.VolumeTest{
|
||||
{
|
||||
Volume: *volSource,
|
||||
File: "index.html",
|
||||
// Must match content
|
||||
ExpectedContent: fmt.Sprintf("Hello from %s from namespace %s",
|
||||
dInfo.Name, f.Namespace.Name),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (t *volumesTestSuite) execTest(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
|
||||
Context(getTestNameStr(t, pattern), func() {
|
||||
var (
|
||||
resource genericVolumeTestResource
|
||||
input volumesTestInput
|
||||
needsCleanup bool
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
needsCleanup = false
|
||||
// Skip unsupported tests to avoid unnecessary resource initialization
|
||||
skipUnsupportedTest(t, driver, pattern)
|
||||
needsCleanup = true
|
||||
|
||||
// Setup test resource for driver and testpattern
|
||||
resource = genericVolumeTestResource{}
|
||||
resource.setupResource(driver, pattern)
|
||||
|
||||
// Create test input
|
||||
input = createVolumesTestInput(pattern, resource)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if needsCleanup {
|
||||
resource.cleanupResource(driver, pattern)
|
||||
}
|
||||
})
|
||||
|
||||
testVolumes(&input)
|
||||
})
|
||||
}
|
||||
|
||||
type volumesTestInput struct {
|
||||
f *framework.Framework
|
||||
name string
|
||||
config framework.VolumeTestConfig
|
||||
fsGroup *int64
|
||||
tests []framework.VolumeTest
|
||||
}
|
||||
|
||||
func testVolumes(input *volumesTestInput) {
|
||||
It("should be mountable", func() {
|
||||
f := input.f
|
||||
cs := f.ClientSet
|
||||
defer framework.VolumeTestCleanup(f, input.config)
|
||||
|
||||
volumeTest := input.tests
|
||||
framework.InjectHtml(cs, input.config, volumeTest[0].Volume, volumeTest[0].ExpectedContent)
|
||||
framework.TestVolumeClient(cs, input.config, input.fsGroup, input.tests)
|
||||
})
|
||||
}
|
||||
Reference in New Issue
Block a user