Add generated file
This PR adds generated files under pkg/client and vendor folder.
This commit is contained in:
572
vendor/k8s.io/kubernetes/test/e2e/storage/volumes.go
generated
vendored
Normal file
572
vendor/k8s.io/kubernetes/test/e2e/storage/volumes.go
generated
vendored
Normal file
@@ -0,0 +1,572 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This test checks that various VolumeSources are working.
|
||||
*
|
||||
* There are two ways, how to test the volumes:
|
||||
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...)
|
||||
* The test creates a server pod, exporting simple 'index.html' file.
|
||||
* Then it uses appropriate VolumeSource to import this file into a client pod
|
||||
* and checks that the pod can see the file. It does so by importing the file
|
||||
* into web server root and loadind the index.html from it.
|
||||
*
|
||||
* These tests work only when privileged containers are allowed, exporting
|
||||
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
|
||||
* other privileged magic in the server pod.
|
||||
*
|
||||
* Note that the server containers are for testing purposes only and should not
|
||||
* be used in production.
|
||||
*
|
||||
* 2) With server outside of Kubernetes (Cinder, ...)
|
||||
* Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
|
||||
* the tested Kubernetes cluster. The test itself creates a new volume,
|
||||
* and checks, that Kubernetes can use it as a volume.
|
||||
*/
|
||||
|
||||
// test/e2e/common/volumes.go duplicates the GlusterFS test from this file. Any changes made to this
|
||||
// test should be made there as well.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere"
|
||||
)
|
||||
|
||||
func DeleteCinderVolume(name string) error {
|
||||
// Try to delete the volume for several seconds - it takes
|
||||
// a while for the plugin to detach it.
|
||||
var output []byte
|
||||
var err error
|
||||
timeout := time.Second * 120
|
||||
|
||||
framework.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
|
||||
output, err = exec.Command("cinder", "delete", name).CombinedOutput()
|
||||
if err == nil {
|
||||
framework.Logf("Cinder volume %s deleted", name)
|
||||
return nil
|
||||
} else {
|
||||
framework.Logf("Failed to delete volume %s: %v", name, err)
|
||||
}
|
||||
}
|
||||
framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:]))
|
||||
return err
|
||||
}
|
||||
|
||||
// These tests need privileged containers, which are disabled by default.
|
||||
var _ = utils.SIGDescribe("Volumes", func() {
|
||||
f := framework.NewDefaultFramework("volume")
|
||||
|
||||
// note that namespace deletion is handled by delete-namespace flag
|
||||
// filled inside BeforeEach
|
||||
var cs clientset.Interface
|
||||
var namespace *v1.Namespace
|
||||
|
||||
BeforeEach(func() {
|
||||
cs = f.ClientSet
|
||||
namespace = f.Namespace
|
||||
})
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// NFS
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
Describe("NFS", func() {
|
||||
It("should be mountable", func() {
|
||||
config, _, serverIP := framework.NewNFSServer(cs, namespace.Name, []string{})
|
||||
defer framework.VolumeTestCleanup(f, config)
|
||||
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
Volume: v1.VolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{
|
||||
Server: serverIP,
|
||||
Path: "/",
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
File: "index.html",
|
||||
// Must match content of test/images/volumes-tester/nfs/index.html
|
||||
ExpectedContent: "Hello from NFS!",
|
||||
},
|
||||
}
|
||||
framework.TestVolumeClient(cs, config, nil, tests)
|
||||
})
|
||||
})
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Gluster
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
Describe("GlusterFS", func() {
|
||||
It("should be mountable", func() {
|
||||
//TODO (copejon) GFS is not supported on debian image.
|
||||
framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu")
|
||||
|
||||
// create gluster server and endpoints
|
||||
config, _, _ := framework.NewGlusterfsServer(cs, namespace.Name)
|
||||
name := config.Prefix + "-server"
|
||||
defer func() {
|
||||
framework.VolumeTestCleanup(f, config)
|
||||
err := cs.CoreV1().Endpoints(namespace.Name).Delete(name, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "defer: Gluster delete endpoints failed")
|
||||
}()
|
||||
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
Volume: v1.VolumeSource{
|
||||
Glusterfs: &v1.GlusterfsVolumeSource{
|
||||
EndpointsName: name,
|
||||
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
|
||||
Path: "test_vol",
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
File: "index.html",
|
||||
// Must match content of test/images/volumes-tester/gluster/index.html
|
||||
ExpectedContent: "Hello from GlusterFS!",
|
||||
},
|
||||
}
|
||||
framework.TestVolumeClient(cs, config, nil, tests)
|
||||
})
|
||||
})
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// iSCSI
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// The test needs privileged containers, which are disabled by default.
|
||||
// Also, make sure that iscsiadm utility and iscsi target kernel modules
|
||||
// are installed on all nodes!
|
||||
// Run the test with "go run hack/e2e.go ... --ginkgo.focus=iSCSI"
|
||||
|
||||
Describe("iSCSI [Feature:Volumes]", func() {
|
||||
It("should be mountable", func() {
|
||||
config, _, serverIP := framework.NewISCSIServer(cs, namespace.Name)
|
||||
defer framework.VolumeTestCleanup(f, config)
|
||||
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
Volume: v1.VolumeSource{
|
||||
ISCSI: &v1.ISCSIVolumeSource{
|
||||
TargetPortal: serverIP + ":3260",
|
||||
// from test/images/volumes-tester/iscsi/initiatorname.iscsi
|
||||
IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c",
|
||||
Lun: 0,
|
||||
FSType: "ext2",
|
||||
},
|
||||
},
|
||||
File: "index.html",
|
||||
// Must match content of test/images/volumes-tester/iscsi/block.tar.gz
|
||||
ExpectedContent: "Hello from iSCSI",
|
||||
},
|
||||
}
|
||||
fsGroup := int64(1234)
|
||||
framework.TestVolumeClient(cs, config, &fsGroup, tests)
|
||||
})
|
||||
})
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Ceph RBD
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
Describe("Ceph RBD [Feature:Volumes]", func() {
|
||||
It("should be mountable", func() {
|
||||
config, _, secret, serverIP := framework.NewRBDServer(cs, namespace.Name)
|
||||
defer framework.VolumeTestCleanup(f, config)
|
||||
defer cs.CoreV1().Secrets(config.Namespace).Delete(secret.Name, nil)
|
||||
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
Volume: v1.VolumeSource{
|
||||
RBD: &v1.RBDVolumeSource{
|
||||
CephMonitors: []string{serverIP},
|
||||
RBDPool: "rbd",
|
||||
RBDImage: "foo",
|
||||
RadosUser: "admin",
|
||||
SecretRef: &v1.LocalObjectReference{
|
||||
Name: secret.Name,
|
||||
},
|
||||
FSType: "ext2",
|
||||
},
|
||||
},
|
||||
File: "index.html",
|
||||
// Must match content of test/images/volumes-tester/rbd/create_block.sh
|
||||
ExpectedContent: "Hello from RBD",
|
||||
},
|
||||
}
|
||||
fsGroup := int64(1234)
|
||||
framework.TestVolumeClient(cs, config, &fsGroup, tests)
|
||||
})
|
||||
})
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Ceph
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
Describe("CephFS [Feature:Volumes]", func() {
|
||||
It("should be mountable", func() {
|
||||
config, _, secret, serverIP := framework.NewRBDServer(cs, namespace.Name)
|
||||
defer framework.VolumeTestCleanup(f, config)
|
||||
defer cs.CoreV1().Secrets(config.Namespace).Delete(secret.Name, nil)
|
||||
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
Volume: v1.VolumeSource{
|
||||
CephFS: &v1.CephFSVolumeSource{
|
||||
Monitors: []string{serverIP + ":6789"},
|
||||
User: "kube",
|
||||
SecretRef: &v1.LocalObjectReference{Name: secret.Name},
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
File: "index.html",
|
||||
// Must match content of test/images/volumes-tester/ceph/index.html
|
||||
ExpectedContent: "Hello Ceph!",
|
||||
},
|
||||
}
|
||||
framework.TestVolumeClient(cs, config, nil, tests)
|
||||
})
|
||||
})
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// OpenStack Cinder
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// This test assumes that OpenStack client tools are installed
|
||||
// (/usr/bin/nova, /usr/bin/cinder and /usr/bin/keystone)
|
||||
// and that the usual OpenStack authentication env. variables are set
|
||||
// (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least).
|
||||
Describe("Cinder [Feature:Volumes]", func() {
|
||||
It("should be mountable", func() {
|
||||
framework.SkipUnlessProviderIs("openstack")
|
||||
config := framework.VolumeTestConfig{
|
||||
Namespace: namespace.Name,
|
||||
Prefix: "cinder",
|
||||
}
|
||||
|
||||
// We assume that namespace.Name is a random string
|
||||
volumeName := namespace.Name
|
||||
By("creating a test Cinder volume")
|
||||
output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput()
|
||||
outputString := string(output[:])
|
||||
framework.Logf("cinder output:\n%s", outputString)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
defer DeleteCinderVolume(volumeName)
|
||||
|
||||
// Parse 'id'' from stdout. Expected format:
|
||||
// | attachments | [] |
|
||||
// | availability_zone | nova |
|
||||
// ...
|
||||
// | id | 1d6ff08f-5d1c-41a4-ad72-4ef872cae685 |
|
||||
volumeID := ""
|
||||
for _, line := range strings.Split(outputString, "\n") {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) != 5 {
|
||||
continue
|
||||
}
|
||||
if fields[1] != "id" {
|
||||
continue
|
||||
}
|
||||
volumeID = fields[3]
|
||||
break
|
||||
}
|
||||
framework.Logf("Volume ID: %s", volumeID)
|
||||
Expect(volumeID).NotTo(Equal(""))
|
||||
|
||||
defer func() {
|
||||
framework.Logf("Running volumeTestCleanup")
|
||||
framework.VolumeTestCleanup(f, config)
|
||||
}()
|
||||
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
Volume: v1.VolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{
|
||||
VolumeID: volumeID,
|
||||
FSType: "ext3",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
File: "index.html",
|
||||
// Randomize index.html to make sure we don't see the
|
||||
// content from previous test runs.
|
||||
ExpectedContent: "Hello from Cinder from namespace " + volumeName,
|
||||
},
|
||||
}
|
||||
|
||||
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
|
||||
|
||||
fsGroup := int64(1234)
|
||||
framework.TestVolumeClient(cs, config, &fsGroup, tests)
|
||||
})
|
||||
})
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// GCE PD
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
Describe("PD", func() {
|
||||
var config framework.VolumeTestConfig
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
config = framework.VolumeTestConfig{
|
||||
Namespace: namespace.Name,
|
||||
Prefix: "pd",
|
||||
// PD will be created in framework.TestContext.CloudConfig.Zone zone,
|
||||
// so pods should be also scheduled there.
|
||||
NodeSelector: map[string]string{
|
||||
kubeletapis.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone,
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
It("should be mountable with ext3", func() {
|
||||
testGCEPD(f, config, cs, "ext3")
|
||||
})
|
||||
It("should be mountable with ext4", func() {
|
||||
testGCEPD(f, config, cs, "ext4")
|
||||
})
|
||||
It("should be mountable with xfs", func() {
|
||||
// xfs is not supported on gci
|
||||
// and not installed by default on debian
|
||||
framework.SkipUnlessNodeOSDistroIs("ubuntu")
|
||||
testGCEPD(f, config, cs, "xfs")
|
||||
})
|
||||
})
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// ConfigMap
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
Describe("ConfigMap", func() {
|
||||
It("should be mountable", func() {
|
||||
config := framework.VolumeTestConfig{
|
||||
Namespace: namespace.Name,
|
||||
Prefix: "configmap",
|
||||
}
|
||||
|
||||
defer framework.VolumeTestCleanup(f, config)
|
||||
configMap := &v1.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ConfigMap",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-map",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"first": "this is the first file",
|
||||
"second": "this is the second file",
|
||||
"third": "this is the third file",
|
||||
},
|
||||
}
|
||||
if _, err := cs.CoreV1().ConfigMaps(namespace.Name).Create(configMap); err != nil {
|
||||
framework.Failf("unable to create test configmap: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = cs.CoreV1().ConfigMaps(namespace.Name).Delete(configMap.Name, nil)
|
||||
}()
|
||||
|
||||
// Test one ConfigMap mounted several times to test #28502
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
Volume: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: config.Prefix + "-map",
|
||||
},
|
||||
Items: []v1.KeyToPath{
|
||||
{
|
||||
Key: "first",
|
||||
Path: "firstfile",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
File: "firstfile",
|
||||
ExpectedContent: "this is the first file",
|
||||
},
|
||||
{
|
||||
Volume: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: config.Prefix + "-map",
|
||||
},
|
||||
Items: []v1.KeyToPath{
|
||||
{
|
||||
Key: "second",
|
||||
Path: "secondfile",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
File: "secondfile",
|
||||
ExpectedContent: "this is the second file",
|
||||
},
|
||||
}
|
||||
framework.TestVolumeClient(cs, config, nil, tests)
|
||||
})
|
||||
})
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// vSphere
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
Describe("vsphere [Feature:Volumes]", func() {
|
||||
It("should be mountable", func() {
|
||||
framework.SkipUnlessProviderIs("vsphere")
|
||||
vspheretest.Bootstrap(f)
|
||||
nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo()
|
||||
var volumePath string
|
||||
config := framework.VolumeTestConfig{
|
||||
Namespace: namespace.Name,
|
||||
Prefix: "vsphere",
|
||||
}
|
||||
volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
defer func() {
|
||||
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
|
||||
}()
|
||||
|
||||
defer func() {
|
||||
framework.Logf("Running volumeTestCleanup")
|
||||
framework.VolumeTestCleanup(f, config)
|
||||
}()
|
||||
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
Volume: v1.VolumeSource{
|
||||
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
|
||||
VolumePath: volumePath,
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
File: "index.html",
|
||||
// Randomize index.html to make sure we don't see the
|
||||
// content from previous test runs.
|
||||
ExpectedContent: "Hello from vSphere from namespace " + namespace.Name,
|
||||
},
|
||||
}
|
||||
|
||||
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
|
||||
|
||||
fsGroup := int64(1234)
|
||||
framework.TestVolumeClient(cs, config, &fsGroup, tests)
|
||||
})
|
||||
})
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Azure Disk
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
Describe("Azure Disk [Feature:Volumes]", func() {
|
||||
It("should be mountable [Slow]", func() {
|
||||
framework.SkipUnlessProviderIs("azure")
|
||||
config := framework.VolumeTestConfig{
|
||||
Namespace: namespace.Name,
|
||||
Prefix: "azure",
|
||||
}
|
||||
|
||||
By("creating a test azure disk volume")
|
||||
volumeName, err := framework.CreatePDWithRetry()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
framework.DeletePDWithRetry(volumeName)
|
||||
}()
|
||||
|
||||
defer func() {
|
||||
framework.Logf("Running volumeTestCleanup")
|
||||
framework.VolumeTestCleanup(f, config)
|
||||
}()
|
||||
fsType := "ext4"
|
||||
readOnly := false
|
||||
diskName := volumeName[(strings.LastIndex(volumeName, "/") + 1):]
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
Volume: v1.VolumeSource{
|
||||
AzureDisk: &v1.AzureDiskVolumeSource{
|
||||
DiskName: diskName,
|
||||
DataDiskURI: volumeName,
|
||||
FSType: &fsType,
|
||||
ReadOnly: &readOnly,
|
||||
},
|
||||
},
|
||||
File: "index.html",
|
||||
// Randomize index.html to make sure we don't see the
|
||||
// content from previous test runs.
|
||||
ExpectedContent: "Hello from Azure from namespace " + volumeName,
|
||||
},
|
||||
}
|
||||
|
||||
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
|
||||
|
||||
fsGroup := int64(1234)
|
||||
framework.TestVolumeClient(cs, config, &fsGroup, tests)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func testGCEPD(f *framework.Framework, config framework.VolumeTestConfig, cs clientset.Interface, fs string) {
|
||||
By("creating a test gce pd volume")
|
||||
volumeName, err := framework.CreatePDWithRetry()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
// - Get NodeName from the pod spec to which the volume is mounted.
|
||||
// - Force detach and delete.
|
||||
pod, err := f.PodClient().Get(config.Prefix+"-client", metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed getting pod %q.", config.Prefix+"-client")
|
||||
detachAndDeletePDs(volumeName, []types.NodeName{types.NodeName(pod.Spec.NodeName)})
|
||||
}()
|
||||
|
||||
defer func() {
|
||||
framework.Logf("Running volumeTestCleanup")
|
||||
framework.VolumeTestCleanup(f, config)
|
||||
}()
|
||||
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
Volume: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: volumeName,
|
||||
FSType: fs,
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
File: "index.html",
|
||||
// Randomize index.html to make sure we don't see the
|
||||
// content from previous test runs.
|
||||
ExpectedContent: "Hello from GCE from namespace " + volumeName,
|
||||
},
|
||||
}
|
||||
|
||||
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
|
||||
|
||||
fsGroup := int64(1234)
|
||||
framework.TestVolumeClient(cs, config, &fsGroup, tests)
|
||||
}
|
Reference in New Issue
Block a user