Add generated file
This PR adds generated files under pkg/client and vendor folder.
This commit is contained in:
80
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/BUILD
generated
vendored
Normal file
80
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/BUILD
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"bootstrap.go",
|
||||
"config.go",
|
||||
"connection.go",
|
||||
"context.go",
|
||||
"nodemapper.go",
|
||||
"persistent_volumes-vsphere.go",
|
||||
"pv_reclaimpolicy.go",
|
||||
"pvc_label_selector.go",
|
||||
"vsphere.go",
|
||||
"vsphere_common.go",
|
||||
"vsphere_scale.go",
|
||||
"vsphere_statefulsets.go",
|
||||
"vsphere_stress.go",
|
||||
"vsphere_utils.go",
|
||||
"vsphere_volume_cluster_ds.go",
|
||||
"vsphere_volume_datastore.go",
|
||||
"vsphere_volume_diskformat.go",
|
||||
"vsphere_volume_disksize.go",
|
||||
"vsphere_volume_fstype.go",
|
||||
"vsphere_volume_master_restart.go",
|
||||
"vsphere_volume_node_delete.go",
|
||||
"vsphere_volume_node_poweroff.go",
|
||||
"vsphere_volume_ops_storm.go",
|
||||
"vsphere_volume_perf.go",
|
||||
"vsphere_volume_placement.go",
|
||||
"vsphere_volume_vpxd_restart.go",
|
||||
"vsphere_volume_vsan_policy.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/storage/vsphere",
|
||||
deps = [
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/find:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/object:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/session:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/vim25:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
|
||||
"//vendor/gopkg.in/gcfg.v1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
59
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/bootstrap.go
generated
vendored
Normal file
59
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/bootstrap.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var once sync.Once
|
||||
var waiting = make(chan bool)
|
||||
var f *framework.Framework
|
||||
|
||||
// Bootstrap takes care of initializing necessary test context for vSphere tests
|
||||
func Bootstrap(fw *framework.Framework) {
|
||||
done := make(chan bool)
|
||||
f = fw
|
||||
go func() {
|
||||
once.Do(bootstrapOnce)
|
||||
<-waiting
|
||||
done <- true
|
||||
}()
|
||||
<-done
|
||||
}
|
||||
|
||||
func bootstrapOnce() {
|
||||
// 1. Read vSphere conf and get VSphere instances
|
||||
vsphereInstances, err := GetVSphereInstances()
|
||||
if err != nil {
|
||||
framework.Failf("Failed to bootstrap vSphere with error: %v", err)
|
||||
}
|
||||
// 2. Get all nodes
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get nodes: %v", err)
|
||||
}
|
||||
TestContext = VSphereContext{NodeMapper: &NodeMapper{}, VSphereInstances: vsphereInstances}
|
||||
// 3. Get Node to VSphere mapping
|
||||
err = TestContext.NodeMapper.GenerateNodeMap(vsphereInstances, *nodeList)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to bootstrap vSphere with error: %v", err)
|
||||
}
|
||||
close(waiting)
|
||||
}
|
180
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/config.go
generated
vendored
Normal file
180
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/config.go
generated
vendored
Normal file
@@ -0,0 +1,180 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"gopkg.in/gcfg.v1"
|
||||
"io"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"os"
|
||||
)
|
||||
|
||||
const (
|
||||
vSphereConfFileEnvVar = "VSPHERE_CONF_FILE"
|
||||
)
|
||||
|
||||
var (
|
||||
confFileLocation = os.Getenv(vSphereConfFileEnvVar)
|
||||
)
|
||||
|
||||
// Config represents vSphere configuration
|
||||
type Config struct {
|
||||
Username string
|
||||
Password string
|
||||
Hostname string
|
||||
Port string
|
||||
Datacenters string
|
||||
RoundTripperCount uint
|
||||
DefaultDatastore string
|
||||
Folder string
|
||||
}
|
||||
|
||||
// ConfigFile represents the content of vsphere.conf file.
|
||||
// Users specify the configuration of one or more vSphere instances in vsphere.conf where
|
||||
// the Kubernetes master and worker nodes are running.
|
||||
type ConfigFile struct {
|
||||
Global struct {
|
||||
// vCenter username.
|
||||
User string `gcfg:"user"`
|
||||
// vCenter password in clear text.
|
||||
Password string `gcfg:"password"`
|
||||
// vCenter port.
|
||||
VCenterPort string `gcfg:"port"`
|
||||
// True if vCenter uses self-signed cert.
|
||||
InsecureFlag bool `gcfg:"insecure-flag"`
|
||||
// Datacenter in which VMs are located.
|
||||
Datacenters string `gcfg:"datacenters"`
|
||||
// Soap round tripper count (retries = RoundTripper - 1)
|
||||
RoundTripperCount uint `gcfg:"soap-roundtrip-count"`
|
||||
}
|
||||
|
||||
VirtualCenter map[string]*Config
|
||||
|
||||
Network struct {
|
||||
// PublicNetwork is name of the network the VMs are joined to.
|
||||
PublicNetwork string `gcfg:"public-network"`
|
||||
}
|
||||
|
||||
Disk struct {
|
||||
// SCSIControllerType defines SCSI controller to be used.
|
||||
SCSIControllerType string `dcfg:"scsicontrollertype"`
|
||||
}
|
||||
|
||||
// Endpoint used to create volumes
|
||||
Workspace struct {
|
||||
VCenterIP string `gcfg:"server"`
|
||||
Datacenter string `gcfg:"datacenter"`
|
||||
Folder string `gcfg:"folder"`
|
||||
DefaultDatastore string `gcfg:"default-datastore"`
|
||||
ResourcePoolPath string `gcfg:"resourcepool-path"`
|
||||
}
|
||||
}
|
||||
|
||||
// GetVSphereInstances parses vsphere.conf and returns VSphere instances
|
||||
func GetVSphereInstances() (map[string]*VSphere, error) {
|
||||
cfg, err := getConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return populateInstanceMap(cfg)
|
||||
}
|
||||
|
||||
func getConfig() (*ConfigFile, error) {
|
||||
if confFileLocation == "" {
|
||||
return nil, fmt.Errorf("Env variable 'VSPHERE_CONF_FILE' is not set.")
|
||||
}
|
||||
confFile, err := os.Open(confFileLocation)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer confFile.Close()
|
||||
cfg, err := readConfig(confFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
// readConfig parses vSphere cloud config file into ConfigFile.
|
||||
func readConfig(config io.Reader) (ConfigFile, error) {
|
||||
if config == nil {
|
||||
err := fmt.Errorf("no vSphere cloud provider config file given")
|
||||
return ConfigFile{}, err
|
||||
}
|
||||
|
||||
var cfg ConfigFile
|
||||
err := gcfg.ReadInto(&cfg, config)
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
func populateInstanceMap(cfg *ConfigFile) (map[string]*VSphere, error) {
|
||||
vsphereInstances := make(map[string]*VSphere)
|
||||
|
||||
if cfg.Workspace.VCenterIP == "" || cfg.Workspace.DefaultDatastore == "" || cfg.Workspace.Folder == "" || cfg.Workspace.Datacenter == "" {
|
||||
msg := fmt.Sprintf("All fields in workspace are mandatory."+
|
||||
" vsphere.conf does not have the workspace specified correctly. cfg.Workspace: %+v", cfg.Workspace)
|
||||
framework.Logf(msg)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
for vcServer, vcConfig := range cfg.VirtualCenter {
|
||||
framework.Logf("Initializing vc server %s", vcServer)
|
||||
if vcServer == "" {
|
||||
framework.Logf("vsphere.conf does not have the VirtualCenter IP address specified")
|
||||
return nil, errors.New("vsphere.conf does not have the VirtualCenter IP address specified")
|
||||
}
|
||||
vcConfig.Hostname = vcServer
|
||||
|
||||
if vcConfig.Username == "" {
|
||||
vcConfig.Username = cfg.Global.User
|
||||
}
|
||||
if vcConfig.Password == "" {
|
||||
vcConfig.Password = cfg.Global.Password
|
||||
}
|
||||
if vcConfig.Username == "" {
|
||||
msg := fmt.Sprintf("vcConfig.User is empty for vc %s!", vcServer)
|
||||
framework.Logf(msg)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if vcConfig.Password == "" {
|
||||
msg := fmt.Sprintf("vcConfig.Password is empty for vc %s!", vcServer)
|
||||
framework.Logf(msg)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if vcConfig.Port == "" {
|
||||
vcConfig.Port = cfg.Global.VCenterPort
|
||||
}
|
||||
if vcConfig.Datacenters == "" && cfg.Global.Datacenters != "" {
|
||||
vcConfig.Datacenters = cfg.Global.Datacenters
|
||||
}
|
||||
if vcConfig.RoundTripperCount == 0 {
|
||||
vcConfig.RoundTripperCount = cfg.Global.RoundTripperCount
|
||||
}
|
||||
|
||||
vcConfig.DefaultDatastore = cfg.Workspace.DefaultDatastore
|
||||
vcConfig.Folder = cfg.Workspace.Folder
|
||||
|
||||
vsphereIns := VSphere{
|
||||
Config: vcConfig,
|
||||
}
|
||||
vsphereInstances[vcServer] = &vsphereIns
|
||||
}
|
||||
|
||||
framework.Logf("ConfigFile %v \n vSphere instances %v", cfg, vsphereInstances)
|
||||
return vsphereInstances, nil
|
||||
}
|
91
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/connection.go
generated
vendored
Normal file
91
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/connection.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
neturl "net/url"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/vmware/govmomi"
|
||||
"github.com/vmware/govmomi/session"
|
||||
"github.com/vmware/govmomi/vim25"
|
||||
)
|
||||
|
||||
const (
|
||||
roundTripperDefaultCount = 3
|
||||
)
|
||||
|
||||
var (
|
||||
clientLock sync.Mutex
|
||||
)
|
||||
|
||||
// Connect makes connection to vSphere
|
||||
// No actions are taken if a connection exists and alive. Otherwise, a new client will be created.
|
||||
func Connect(ctx context.Context, vs *VSphere) error {
|
||||
var err error
|
||||
clientLock.Lock()
|
||||
defer clientLock.Unlock()
|
||||
|
||||
if vs.Client == nil {
|
||||
vs.Client, err = NewClient(ctx, vs)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to create govmomi client. err: %+v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
manager := session.NewManager(vs.Client.Client)
|
||||
userSession, err := manager.UserSession(ctx)
|
||||
if err != nil {
|
||||
glog.Errorf("Error while obtaining user session. err: %+v", err)
|
||||
return err
|
||||
}
|
||||
if userSession != nil {
|
||||
return nil
|
||||
}
|
||||
glog.Warningf("Creating new client session since the existing session is not valid or not authenticated")
|
||||
vs.Client.Logout(ctx)
|
||||
vs.Client, err = NewClient(ctx, vs)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to create govmomi client. err: %+v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewClient creates a new client for vSphere connection
|
||||
func NewClient(ctx context.Context, vs *VSphere) (*govmomi.Client, error) {
|
||||
url, err := neturl.Parse(fmt.Sprintf("https://%s:%s/sdk", vs.Config.Hostname, vs.Config.Port))
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to parse URL: %s. err: %+v", url, err)
|
||||
return nil, err
|
||||
}
|
||||
url.User = neturl.UserPassword(vs.Config.Username, vs.Config.Password)
|
||||
client, err := govmomi.NewClient(ctx, url, true)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to create new client. err: %+v", err)
|
||||
return nil, err
|
||||
}
|
||||
if vs.Config.RoundTripperCount == 0 {
|
||||
vs.Config.RoundTripperCount = roundTripperDefaultCount
|
||||
}
|
||||
client.RoundTripper = vim25.Retry(client.RoundTripper, vim25.TemporaryNetworkError(int(vs.Config.RoundTripperCount)))
|
||||
return client, nil
|
||||
}
|
26
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/context.go
generated
vendored
Normal file
26
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/context.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
// Context holds common information for vSphere tests
|
||||
type VSphereContext struct {
|
||||
NodeMapper *NodeMapper
|
||||
VSphereInstances map[string]*VSphere
|
||||
}
|
||||
|
||||
// TestContext should be used by all tests to access common context data. It should be initialized only once, during bootstrapping the tests.
|
||||
var TestContext VSphereContext
|
134
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/nodemapper.go
generated
vendored
Normal file
134
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/nodemapper.go
generated
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
type NodeMapper struct {
|
||||
}
|
||||
|
||||
type NodeInfo struct {
|
||||
Name string
|
||||
DataCenterRef types.ManagedObjectReference
|
||||
VirtualMachineRef types.ManagedObjectReference
|
||||
VSphere *VSphere
|
||||
}
|
||||
|
||||
var (
|
||||
nameToNodeInfo = make(map[string]*NodeInfo)
|
||||
)
|
||||
|
||||
// GenerateNodeMap populates node name to node info map
|
||||
func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, nodeList v1.NodeList) error {
|
||||
type VmSearch struct {
|
||||
vs *VSphere
|
||||
datacenter *object.Datacenter
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var queueChannel []*VmSearch
|
||||
|
||||
var datacenters []*object.Datacenter
|
||||
var err error
|
||||
for _, vs := range vSphereInstances {
|
||||
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
if vs.Config.Datacenters == "" {
|
||||
datacenters, err = vs.GetAllDatacenter(ctx)
|
||||
if err != nil {
|
||||
framework.Logf("NodeMapper error: %v", err)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
dcName := strings.Split(vs.Config.Datacenters, ",")
|
||||
for _, dc := range dcName {
|
||||
dc = strings.TrimSpace(dc)
|
||||
if dc == "" {
|
||||
continue
|
||||
}
|
||||
datacenter, err := vs.GetDatacenter(ctx, dc)
|
||||
if err != nil {
|
||||
framework.Logf("NodeMapper error dc: %s \n err: %v", dc, err)
|
||||
|
||||
continue
|
||||
}
|
||||
datacenters = append(datacenters, datacenter)
|
||||
}
|
||||
}
|
||||
|
||||
for _, dc := range datacenters {
|
||||
framework.Logf("Search candidates vc=%s and datacenter=%s", vs.Config.Hostname, dc.Name())
|
||||
queueChannel = append(queueChannel, &VmSearch{vs: vs, datacenter: dc})
|
||||
}
|
||||
}
|
||||
|
||||
for _, node := range nodeList.Items {
|
||||
n := node
|
||||
go func() {
|
||||
nodeUUID := getUUIDFromProviderID(n.Spec.ProviderID)
|
||||
framework.Logf("Searching for node with UUID: %s", nodeUUID)
|
||||
for _, res := range queueChannel {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
vm, err := res.vs.GetVMByUUID(ctx, nodeUUID, res.datacenter)
|
||||
if err != nil {
|
||||
framework.Logf("Error %v while looking for node=%s in vc=%s and datacenter=%s",
|
||||
err, n.Name, res.vs.Config.Hostname, res.datacenter.Name())
|
||||
continue
|
||||
}
|
||||
if vm != nil {
|
||||
framework.Logf("Found node %s as vm=%+v in vc=%s and datacenter=%s",
|
||||
n.Name, vm, res.vs.Config.Hostname, res.datacenter.Name())
|
||||
nodeInfo := &NodeInfo{Name: n.Name, DataCenterRef: res.datacenter.Reference(), VirtualMachineRef: vm.Reference(), VSphere: res.vs}
|
||||
nm.SetNodeInfo(n.Name, nodeInfo)
|
||||
break
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Add(1)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
if len(nameToNodeInfo) != len(nodeList.Items) {
|
||||
return errors.New("all nodes not mapped to respective vSphere")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNodeInfo return NodeInfo for given nodeName
|
||||
func (nm *NodeMapper) GetNodeInfo(nodeName string) *NodeInfo {
|
||||
return nameToNodeInfo[nodeName]
|
||||
}
|
||||
|
||||
// SetNodeInfo sets NodeInfo for given nodeName. This function is not thread safe. Users need to handle concurrency.
|
||||
func (nm *NodeMapper) SetNodeInfo(nodeName string, nodeInfo *NodeInfo) {
|
||||
nameToNodeInfo[nodeName] = nodeInfo
|
||||
}
|
217
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/persistent_volumes-vsphere.go
generated
vendored
Normal file
217
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/persistent_volumes-vsphere.go
generated
vendored
Normal file
@@ -0,0 +1,217 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
// Testing configurations of single a PV/PVC pair attached to a vSphere Disk
|
||||
var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
|
||||
var (
|
||||
c clientset.Interface
|
||||
ns string
|
||||
volumePath string
|
||||
pv *v1.PersistentVolume
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
clientPod *v1.Pod
|
||||
pvConfig framework.PersistentVolumeConfig
|
||||
pvcConfig framework.PersistentVolumeClaimConfig
|
||||
err error
|
||||
node string
|
||||
volLabel labels.Set
|
||||
selector *metav1.LabelSelector
|
||||
nodeInfo *NodeInfo
|
||||
)
|
||||
|
||||
f := framework.NewDefaultFramework("pv")
|
||||
/*
|
||||
Test Setup
|
||||
|
||||
1. Create volume (vmdk)
|
||||
2. Create PV with volume path for the vmdk.
|
||||
3. Create PVC to bind with PV.
|
||||
4. Create a POD using the PVC.
|
||||
5. Verify Disk and Attached to the node.
|
||||
*/
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("vsphere")
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
clientPod = nil
|
||||
pvc = nil
|
||||
pv = nil
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
if len(nodes.Items) < 1 {
|
||||
framework.Skipf("Requires at least %d node", 1)
|
||||
}
|
||||
nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name)
|
||||
|
||||
volLabel = labels.Set{framework.VolumeSelectorKey: ns}
|
||||
selector = metav1.SetAsLabelSelector(volLabel)
|
||||
|
||||
if volumePath == "" {
|
||||
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pvConfig = framework.PersistentVolumeConfig{
|
||||
NamePrefix: "vspherepv-",
|
||||
Labels: volLabel,
|
||||
PVSource: v1.PersistentVolumeSource{
|
||||
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
|
||||
VolumePath: volumePath,
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
Prebind: nil,
|
||||
}
|
||||
emptyStorageClass := ""
|
||||
pvcConfig = framework.PersistentVolumeClaimConfig{
|
||||
Selector: selector,
|
||||
StorageClassName: &emptyStorageClass,
|
||||
}
|
||||
}
|
||||
By("Creating the PV and PVC")
|
||||
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
|
||||
|
||||
By("Creating the Client Pod")
|
||||
clientPod, err = framework.CreateClientPod(c, ns, pvc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
node = clientPod.Spec.NodeName
|
||||
|
||||
By("Verify disk should be attached to the node")
|
||||
isAttached, err := diskIsAttached(volumePath, node)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(isAttached).To(BeTrue(), "disk is not attached with the node")
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
framework.Logf("AfterEach: Cleaning up test resources")
|
||||
if c != nil {
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "AfterEach: failed to delete pod ", clientPod.Name)
|
||||
|
||||
if pv != nil {
|
||||
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "AfterEach: failed to delete PV ", pv.Name)
|
||||
}
|
||||
if pvc != nil {
|
||||
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "AfterEach: failed to delete PVC ", pvc.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
/*
|
||||
Clean up
|
||||
|
||||
1. Wait and verify volume is detached from the node
|
||||
2. Delete PV
|
||||
3. Delete Volume (vmdk)
|
||||
*/
|
||||
framework.AddCleanupAction(func() {
|
||||
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
|
||||
if len(ns) > 0 && len(volumePath) > 0 {
|
||||
framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node))
|
||||
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
|
||||
}
|
||||
})
|
||||
|
||||
/*
|
||||
Delete the PVC and then the pod. Expect the pod to succeed in unmounting and detaching PD on delete.
|
||||
|
||||
Test Steps:
|
||||
1. Delete PVC.
|
||||
2. Delete POD, POD deletion should succeed.
|
||||
*/
|
||||
|
||||
It("should test that deleting a PVC before the pod does not cause pod deletion to fail on vsphere volume detach", func() {
|
||||
By("Deleting the Claim")
|
||||
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
|
||||
pvc = nil
|
||||
|
||||
By("Deleting the Pod")
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name)
|
||||
})
|
||||
|
||||
/*
|
||||
Delete the PV and then the pod. Expect the pod to succeed in unmounting and detaching PD on delete.
|
||||
|
||||
Test Steps:
|
||||
1. Delete PV.
|
||||
2. Delete POD, POD deletion should succeed.
|
||||
*/
|
||||
It("should test that deleting the PV before the pod does not cause pod deletion to fail on vspehre volume detach", func() {
|
||||
By("Deleting the Persistent Volume")
|
||||
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
|
||||
pv = nil
|
||||
|
||||
By("Deleting the pod")
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name)
|
||||
})
|
||||
/*
|
||||
This test verifies that a volume mounted to a pod remains mounted after a kubelet restarts.
|
||||
Steps:
|
||||
1. Write to the volume
|
||||
2. Restart kubelet
|
||||
3. Verify that written file is accessible after kubelet restart
|
||||
*/
|
||||
It("should test that a file written to the vspehre volume mount before kubelet restart can be read after restart [Disruptive]", func() {
|
||||
utils.TestKubeletRestartsAndRestoresMount(c, f, clientPod)
|
||||
})
|
||||
|
||||
/*
|
||||
This test verifies that a volume mounted to a pod that is deleted while the kubelet is down
|
||||
unmounts volume when the kubelet returns.
|
||||
|
||||
Steps:
|
||||
1. Verify volume is mounted on the node.
|
||||
2. Stop kubelet.
|
||||
3. Delete pod.
|
||||
4. Start kubelet.
|
||||
5. Verify that volume mount not to be found.
|
||||
*/
|
||||
It("should test that a vspehre volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]", func() {
|
||||
utils.TestVolumeUnmountsFromDeletedPod(c, f, clientPod)
|
||||
})
|
||||
|
||||
/*
|
||||
This test verifies that deleting the Namespace of a PVC and Pod causes the successful detach of Persistent Disk
|
||||
|
||||
Steps:
|
||||
1. Delete Namespace.
|
||||
2. Wait for namespace to get deleted. (Namespace deletion should trigger deletion of belonging pods)
|
||||
3. Verify volume should be detached from the node.
|
||||
*/
|
||||
It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of vsphere volume", func() {
|
||||
By("Deleting the Namespace")
|
||||
err := c.CoreV1().Namespaces().Delete(ns, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForNamespacesDeleted(c, []string{ns}, 3*time.Minute)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Verifying Persistent Disk detaches")
|
||||
waitForVSphereDiskToDetach(volumePath, node)
|
||||
})
|
||||
})
|
249
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/pv_reclaimpolicy.go
generated
vendored
Normal file
249
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/pv_reclaimpolicy.go
generated
vendored
Normal file
@@ -0,0 +1,249 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
|
||||
f := framework.NewDefaultFramework("persistentvolumereclaim")
|
||||
var (
|
||||
c clientset.Interface
|
||||
ns string
|
||||
volumePath string
|
||||
pv *v1.PersistentVolume
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
nodeInfo *NodeInfo
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
|
||||
})
|
||||
|
||||
utils.SIGDescribe("persistentvolumereclaim:vsphere", func() {
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("vsphere")
|
||||
Bootstrap(f)
|
||||
nodeInfo = GetReadySchedulableRandomNodeInfo()
|
||||
pv = nil
|
||||
pvc = nil
|
||||
volumePath = ""
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
testCleanupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, volumePath, pv, pvc)
|
||||
})
|
||||
|
||||
/*
|
||||
This test verifies persistent volume should be deleted when reclaimPolicy on the PV is set to delete and
|
||||
associated claim is deleted
|
||||
|
||||
Test Steps:
|
||||
1. Create vmdk
|
||||
2. Create PV Spec with volume path set to VMDK file created in Step-1, and PersistentVolumeReclaimPolicy is set to Delete
|
||||
3. Create PVC with the storage request set to PV's storage capacity.
|
||||
4. Wait for PV and PVC to bound.
|
||||
5. Delete PVC
|
||||
6. Verify PV is deleted automatically.
|
||||
*/
|
||||
It("should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted", func() {
|
||||
var err error
|
||||
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
deletePVCAfterBind(c, ns, pvc, pv)
|
||||
pvc = nil
|
||||
|
||||
By("verify pv is deleted")
|
||||
err = framework.WaitForPersistentVolumeDeleted(c, pv.Name, 3*time.Second, 300*time.Second)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pv = nil
|
||||
volumePath = ""
|
||||
})
|
||||
|
||||
/*
|
||||
Test Steps:
|
||||
1. Create vmdk
|
||||
2. Create PV Spec with volume path set to VMDK file created in Step-1, and PersistentVolumeReclaimPolicy is set to Delete
|
||||
3. Create PVC with the storage request set to PV's storage capacity.
|
||||
4. Wait for PV and PVC to bound.
|
||||
5. Delete PVC.
|
||||
6. Verify volume is attached to the node and volume is accessible in the pod.
|
||||
7. Verify PV status should be failed.
|
||||
8. Delete the pod.
|
||||
9. Verify PV should be detached from the node and automatically deleted.
|
||||
*/
|
||||
It("should not detach and unmount PV when associated pvc with delete as reclaimPolicy is deleted when it is in use by the pod", func() {
|
||||
var err error
|
||||
|
||||
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Wait for PV and PVC to Bind
|
||||
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
|
||||
|
||||
By("Creating the Pod")
|
||||
pod, err := framework.CreateClientPod(c, ns, pvc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Deleting the Claim")
|
||||
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
|
||||
pvc = nil
|
||||
|
||||
// Verify PV is Present, after PVC is deleted and PV status should be Failed.
|
||||
pv, err := c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(framework.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second)).NotTo(HaveOccurred())
|
||||
|
||||
By("Verify the volume is attached to the node")
|
||||
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
|
||||
Expect(verifyDiskAttachedError).NotTo(HaveOccurred())
|
||||
Expect(isVolumeAttached).To(BeTrue())
|
||||
|
||||
By("Verify the volume is accessible and available in the pod")
|
||||
verifyVSphereVolumesAccessible(c, pod, []*v1.PersistentVolume{pv})
|
||||
framework.Logf("Verified that Volume is accessible in the POD after deleting PV claim")
|
||||
|
||||
By("Deleting the Pod")
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name)
|
||||
|
||||
By("Verify PV is detached from the node after Pod is deleted")
|
||||
Expect(waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)).NotTo(HaveOccurred())
|
||||
|
||||
By("Verify PV should be deleted automatically")
|
||||
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second))
|
||||
pv = nil
|
||||
volumePath = ""
|
||||
})
|
||||
|
||||
/*
|
||||
This test Verify persistent volume should be retained when reclaimPolicy on the PV is set to retain
|
||||
and associated claim is deleted
|
||||
|
||||
Test Steps:
|
||||
1. Create vmdk
|
||||
2. Create PV Spec with volume path set to VMDK file created in Step-1, and PersistentVolumeReclaimPolicy is set to Retain
|
||||
3. Create PVC with the storage request set to PV's storage capacity.
|
||||
4. Wait for PV and PVC to bound.
|
||||
5. Write some content in the volume.
|
||||
6. Delete PVC
|
||||
7. Verify PV is retained.
|
||||
8. Delete retained PV.
|
||||
9. Create PV Spec with the same volume path used in step 2.
|
||||
10. Create PVC with the storage request set to PV's storage capacity.
|
||||
11. Created POD using PVC created in Step 10 and verify volume content is matching.
|
||||
*/
|
||||
|
||||
It("should retain persistent volume when reclaimPolicy set to retain when associated claim is deleted", func() {
|
||||
var err error
|
||||
var volumeFileContent = "hello from vsphere cloud provider, Random Content is :" + strconv.FormatInt(time.Now().UnixNano(), 10)
|
||||
|
||||
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimRetain)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
writeContentToVSpherePV(c, pvc, volumeFileContent)
|
||||
|
||||
By("Delete PVC")
|
||||
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
|
||||
pvc = nil
|
||||
|
||||
By("Verify PV is retained")
|
||||
framework.Logf("Waiting for PV %v to become Released", pv.Name)
|
||||
err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 3*time.Second, 300*time.Second)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
|
||||
|
||||
By("Creating the PV for same volume path")
|
||||
pv = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimRetain, nil)
|
||||
pv, err = c.CoreV1().PersistentVolumes().Create(pv)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("creating the pvc")
|
||||
pvc = getVSpherePersistentVolumeClaimSpec(ns, nil)
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("wait for the pv and pvc to bind")
|
||||
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
|
||||
verifyContentOfVSpherePV(c, pvc, volumeFileContent)
|
||||
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// Test Setup for persistentvolumereclaim tests for vSphere Provider
|
||||
func testSetupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *NodeInfo, ns string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy) (volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim, err error) {
|
||||
By("running testSetupVSpherePersistentVolumeReclaim")
|
||||
By("creating vmdk")
|
||||
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
By("creating the pv")
|
||||
pv = getVSpherePersistentVolumeSpec(volumePath, persistentVolumeReclaimPolicy, nil)
|
||||
pv, err = c.CoreV1().PersistentVolumes().Create(pv)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
By("creating the pvc")
|
||||
pvc = getVSpherePersistentVolumeClaimSpec(ns, nil)
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
|
||||
return
|
||||
}
|
||||
|
||||
// Test Cleanup for persistentvolumereclaim tests for vSphere Provider
|
||||
func testCleanupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *NodeInfo, ns string, volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
|
||||
By("running testCleanupVSpherePersistentVolumeReclaim")
|
||||
if len(volumePath) > 0 {
|
||||
err := nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
if pv != nil {
|
||||
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
|
||||
}
|
||||
if pvc != nil {
|
||||
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// func to wait until PV and PVC bind and once bind completes, delete the PVC
|
||||
func deletePVCAfterBind(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
|
||||
var err error
|
||||
|
||||
By("wait for the pv and pvc to bind")
|
||||
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
|
||||
|
||||
By("delete pvc")
|
||||
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
|
||||
if !apierrs.IsNotFound(err) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
150
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/pvc_label_selector.go
generated
vendored
Normal file
150
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/pvc_label_selector.go
generated
vendored
Normal file
@@ -0,0 +1,150 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
/*
|
||||
This is a function test for Selector-Label Volume Binding Feature
|
||||
Test verifies volume with the matching label is bounded with the PVC.
|
||||
|
||||
Test Steps
|
||||
----------
|
||||
1. Create VMDK.
|
||||
2. Create pv with lable volume-type:ssd, volume path set to vmdk created in previous step, and PersistentVolumeReclaimPolicy is set to Delete.
|
||||
3. Create PVC (pvc_vvol) with label selector to match with volume-type:vvol
|
||||
4. Create PVC (pvc_ssd) with label selector to match with volume-type:ssd
|
||||
5. Wait and verify pvc_ssd is bound with PV.
|
||||
6. Verify Status of pvc_vvol is still pending.
|
||||
7. Delete pvc_ssd.
|
||||
8. verify associated pv is also deleted.
|
||||
9. delete pvc_vvol
|
||||
|
||||
*/
|
||||
var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
|
||||
f := framework.NewDefaultFramework("pvclabelselector")
|
||||
var (
|
||||
c clientset.Interface
|
||||
ns string
|
||||
pv_ssd *v1.PersistentVolume
|
||||
pvc_ssd *v1.PersistentVolumeClaim
|
||||
pvc_vvol *v1.PersistentVolumeClaim
|
||||
volumePath string
|
||||
ssdlabels map[string]string
|
||||
vvollabels map[string]string
|
||||
err error
|
||||
nodeInfo *NodeInfo
|
||||
)
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("vsphere")
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
Bootstrap(f)
|
||||
nodeInfo = GetReadySchedulableRandomNodeInfo()
|
||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
|
||||
ssdlabels = make(map[string]string)
|
||||
ssdlabels["volume-type"] = "ssd"
|
||||
vvollabels = make(map[string]string)
|
||||
vvollabels["volume-type"] = "vvol"
|
||||
|
||||
})
|
||||
|
||||
utils.SIGDescribe("Selector-Label Volume Binding:vsphere", func() {
|
||||
AfterEach(func() {
|
||||
By("Running clean up actions")
|
||||
if framework.ProviderIs("vsphere") {
|
||||
testCleanupVSpherePVClabelselector(c, ns, nodeInfo, volumePath, pv_ssd, pvc_ssd, pvc_vvol)
|
||||
}
|
||||
})
|
||||
It("should bind volume with claim for given label", func() {
|
||||
volumePath, pv_ssd, pvc_ssd, pvc_vvol, err = testSetupVSpherePVClabelselector(c, nodeInfo, ns, ssdlabels, vvollabels)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("wait for the pvc_ssd to bind with pv_ssd")
|
||||
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv_ssd, pvc_ssd))
|
||||
|
||||
By("Verify status of pvc_vvol is pending")
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvc_vvol.Name, 3*time.Second, 300*time.Second)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("delete pvc_ssd")
|
||||
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name)
|
||||
|
||||
By("verify pv_ssd is deleted")
|
||||
err = framework.WaitForPersistentVolumeDeleted(c, pv_ssd.Name, 3*time.Second, 300*time.Second)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
volumePath = ""
|
||||
|
||||
By("delete pvc_vvol")
|
||||
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns), "Failed to delete PVC ", pvc_vvol.Name)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim, err error) {
|
||||
volumePath = ""
|
||||
By("creating vmdk")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
By("creating the pv with lable volume-type:ssd")
|
||||
pv_ssd = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimDelete, ssdlabels)
|
||||
pv_ssd, err = c.CoreV1().PersistentVolumes().Create(pv_ssd)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
By("creating pvc with label selector to match with volume-type:vvol")
|
||||
pvc_vvol = getVSpherePersistentVolumeClaimSpec(ns, vvollabels)
|
||||
pvc_vvol, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc_vvol)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
By("creating pvc with label selector to match with volume-type:ssd")
|
||||
pvc_ssd = getVSpherePersistentVolumeClaimSpec(ns, ssdlabels)
|
||||
pvc_ssd, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc_ssd)
|
||||
return
|
||||
}
|
||||
|
||||
func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, nodeInfo *NodeInfo, volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim) {
|
||||
By("running testCleanupVSpherePVClabelselector")
|
||||
if len(volumePath) > 0 {
|
||||
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
|
||||
}
|
||||
if pvc_ssd != nil {
|
||||
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name)
|
||||
}
|
||||
if pvc_vvol != nil {
|
||||
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns), "Failed to delete PVC ", pvc_vvol.Name)
|
||||
}
|
||||
if pv_ssd != nil {
|
||||
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv_ssd.Name), "Faled to delete PV ", pv_ssd.Name)
|
||||
}
|
||||
}
|
241
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere.go
generated
vendored
Normal file
241
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere.go
generated
vendored
Normal file
@@ -0,0 +1,241 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/vmware/govmomi"
|
||||
"github.com/vmware/govmomi/find"
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/vim25/soap"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
VolDir = "kubevols"
|
||||
DefaultDiskCapacityKB = 2097152
|
||||
DefaultDiskFormat = "thin"
|
||||
DefaultSCSIControllerType = "lsiLogic"
|
||||
VirtualMachineType = "VirtualMachine"
|
||||
)
|
||||
|
||||
// Represents a vSphere instance where one or more kubernetes nodes are running.
|
||||
type VSphere struct {
|
||||
Config *Config
|
||||
Client *govmomi.Client
|
||||
}
|
||||
|
||||
// VolumeOptions specifies various options for a volume.
|
||||
type VolumeOptions struct {
|
||||
Name string
|
||||
CapacityKB int
|
||||
DiskFormat string
|
||||
SCSIControllerType string
|
||||
Datastore string
|
||||
}
|
||||
|
||||
// GetDatacenter returns the DataCenter Object for the given datacenterPath
|
||||
func (vs *VSphere) GetDatacenter(ctx context.Context, datacenterPath string) (*object.Datacenter, error) {
|
||||
Connect(ctx, vs)
|
||||
finder := find.NewFinder(vs.Client.Client, false)
|
||||
return finder.Datacenter(ctx, datacenterPath)
|
||||
}
|
||||
|
||||
// GetDatacenter returns the DataCenter Object for the given datacenterPath
|
||||
func (vs *VSphere) GetDatacenterFromObjectReference(ctx context.Context, dc object.Reference) *object.Datacenter {
|
||||
Connect(ctx, vs)
|
||||
return object.NewDatacenter(vs.Client.Client, dc.Reference())
|
||||
}
|
||||
|
||||
// GetAllDatacenter returns all the DataCenter Objects
|
||||
func (vs *VSphere) GetAllDatacenter(ctx context.Context) ([]*object.Datacenter, error) {
|
||||
Connect(ctx, vs)
|
||||
finder := find.NewFinder(vs.Client.Client, false)
|
||||
return finder.DatacenterList(ctx, "*")
|
||||
}
|
||||
|
||||
// GetVMByUUID gets the VM object Reference from the given vmUUID
|
||||
func (vs *VSphere) GetVMByUUID(ctx context.Context, vmUUID string, dc object.Reference) (object.Reference, error) {
|
||||
Connect(ctx, vs)
|
||||
datacenter := vs.GetDatacenterFromObjectReference(ctx, dc)
|
||||
s := object.NewSearchIndex(vs.Client.Client)
|
||||
vmUUID = strings.ToLower(strings.TrimSpace(vmUUID))
|
||||
return s.FindByUuid(ctx, datacenter, vmUUID, true, nil)
|
||||
}
|
||||
|
||||
// GetFolderByPath gets the Folder Object Reference from the given folder path
|
||||
// folderPath should be the full path to folder
|
||||
func (vs *VSphere) GetFolderByPath(ctx context.Context, dc object.Reference, folderPath string) (vmFolderMor types.ManagedObjectReference, err error) {
|
||||
Connect(ctx, vs)
|
||||
datacenter := object.NewDatacenter(vs.Client.Client, dc.Reference())
|
||||
finder := find.NewFinder(datacenter.Client(), false)
|
||||
finder.SetDatacenter(datacenter)
|
||||
vmFolder, err := finder.Folder(ctx, folderPath)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get the folder reference for %s. err: %+v", folderPath, err)
|
||||
return vmFolderMor, err
|
||||
}
|
||||
return vmFolder.Reference(), nil
|
||||
}
|
||||
|
||||
// CreateVolume creates a vsphere volume using given volume paramemters specified in VolumeOptions.
|
||||
// If volume is created successfully the canonical disk path is returned else error is returned.
|
||||
func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions, dataCenterRef types.ManagedObjectReference) (string, error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
Connect(ctx, vs)
|
||||
datacenter := object.NewDatacenter(vs.Client.Client, dataCenterRef)
|
||||
var (
|
||||
err error
|
||||
directoryAlreadyPresent = false
|
||||
)
|
||||
if datacenter == nil {
|
||||
return "", fmt.Errorf("datacenter is nil")
|
||||
}
|
||||
vs.initVolumeOptions(volumeOptions)
|
||||
finder := find.NewFinder(datacenter.Client(), false)
|
||||
finder.SetDatacenter(datacenter)
|
||||
ds, err := finder.Datastore(ctx, volumeOptions.Datastore)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed while searching for datastore: %s. err: %+v", volumeOptions.Datastore, err)
|
||||
}
|
||||
directoryPath := filepath.Clean(ds.Path(VolDir)) + "/"
|
||||
fileManager := object.NewFileManager(ds.Client())
|
||||
err = fileManager.MakeDirectory(ctx, directoryPath, datacenter, false)
|
||||
if err != nil {
|
||||
if soap.IsSoapFault(err) {
|
||||
soapFault := soap.ToSoapFault(err)
|
||||
if _, ok := soapFault.VimFault().(types.FileAlreadyExists); ok {
|
||||
directoryAlreadyPresent = true
|
||||
framework.Logf("Directory with the path %+q is already present", directoryPath)
|
||||
}
|
||||
}
|
||||
if !directoryAlreadyPresent {
|
||||
framework.Logf("Cannot create dir %#v. err %s", directoryPath, err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
framework.Logf("Created dir with path as %+q", directoryPath)
|
||||
vmdkPath := directoryPath + volumeOptions.Name + ".vmdk"
|
||||
|
||||
// Create a virtual disk manager
|
||||
vdm := object.NewVirtualDiskManager(ds.Client())
|
||||
// Create specification for new virtual disk
|
||||
vmDiskSpec := &types.FileBackedVirtualDiskSpec{
|
||||
VirtualDiskSpec: types.VirtualDiskSpec{
|
||||
AdapterType: volumeOptions.SCSIControllerType,
|
||||
DiskType: volumeOptions.DiskFormat,
|
||||
},
|
||||
CapacityKb: int64(volumeOptions.CapacityKB),
|
||||
}
|
||||
// Create virtual disk
|
||||
task, err := vdm.CreateVirtualDisk(ctx, vmdkPath, datacenter, vmDiskSpec)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to create virtual disk: %s. err: %+v", vmdkPath, err)
|
||||
return "", err
|
||||
}
|
||||
taskInfo, err := task.WaitForResult(ctx, nil)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to complete virtual disk creation: %s. err: %+v", vmdkPath, err)
|
||||
return "", err
|
||||
}
|
||||
volumePath := taskInfo.Result.(string)
|
||||
canonicalDiskPath, err := getCanonicalVolumePath(ctx, datacenter, volumePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return canonicalDiskPath, nil
|
||||
}
|
||||
|
||||
// DeleteVolume deletes the vmdk file specified in the volumePath.
|
||||
// if an error is encountered while deleting volume, error is returned.
|
||||
func (vs *VSphere) DeleteVolume(volumePath string, dataCenterRef types.ManagedObjectReference) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
Connect(ctx, vs)
|
||||
|
||||
datacenter := object.NewDatacenter(vs.Client.Client, dataCenterRef)
|
||||
virtualDiskManager := object.NewVirtualDiskManager(datacenter.Client())
|
||||
diskPath := removeStorageClusterORFolderNameFromVDiskPath(volumePath)
|
||||
// Delete virtual disk
|
||||
task, err := virtualDiskManager.DeleteVirtualDisk(ctx, diskPath, datacenter)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete virtual disk. err: %v", err)
|
||||
return err
|
||||
}
|
||||
err = task.Wait(ctx)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete virtual disk. err: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsVMPresent checks if VM with the name specified in the vmName argument, is present in the vCenter inventory.
|
||||
// if VM is present, function returns true else false.
|
||||
func (vs *VSphere) IsVMPresent(vmName string, dataCenterRef types.ManagedObjectReference) (isVMPresent bool, err error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
Connect(ctx, vs)
|
||||
folderMor, err := vs.GetFolderByPath(ctx, dataCenterRef, vs.Config.Folder)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
vmFolder := object.NewFolder(vs.Client.Client, folderMor)
|
||||
vmFoldersChildren, err := vmFolder.Children(ctx)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get children from Folder: %s. err: %+v", vmFolder.InventoryPath, err)
|
||||
return
|
||||
}
|
||||
for _, vmFoldersChild := range vmFoldersChildren {
|
||||
if vmFoldersChild.Reference().Type == VirtualMachineType {
|
||||
if object.NewVirtualMachine(vs.Client.Client, vmFoldersChild.Reference()).Name() == vmName {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// initVolumeOptions function sets default values for volumeOptions parameters if not set
|
||||
func (vs *VSphere) initVolumeOptions(volumeOptions *VolumeOptions) {
|
||||
if volumeOptions == nil {
|
||||
volumeOptions = &VolumeOptions{}
|
||||
}
|
||||
if volumeOptions.Datastore == "" {
|
||||
volumeOptions.Datastore = vs.Config.DefaultDatastore
|
||||
}
|
||||
if volumeOptions.CapacityKB == 0 {
|
||||
volumeOptions.CapacityKB = DefaultDiskCapacityKB
|
||||
}
|
||||
if volumeOptions.Name == "" {
|
||||
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
|
||||
}
|
||||
if volumeOptions.DiskFormat == "" {
|
||||
volumeOptions.DiskFormat = DefaultDiskFormat
|
||||
}
|
||||
if volumeOptions.SCSIControllerType == "" {
|
||||
volumeOptions.SCSIControllerType = DefaultSCSIControllerType
|
||||
}
|
||||
}
|
66
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_common.go
generated
vendored
Normal file
66
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_common.go
generated
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
. "github.com/onsi/gomega"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
SPBMPolicyName = "VSPHERE_SPBM_POLICY_NAME"
|
||||
StorageClassDatastoreName = "VSPHERE_DATASTORE"
|
||||
SecondSharedDatastore = "VSPHERE_SECOND_SHARED_DATASTORE"
|
||||
KubernetesClusterName = "VSPHERE_KUBERNETES_CLUSTER"
|
||||
SPBMTagPolicy = "VSPHERE_SPBM_TAG_POLICY"
|
||||
)
|
||||
|
||||
const (
|
||||
VCPClusterDatastore = "CLUSTER_DATASTORE"
|
||||
SPBMPolicyDataStoreCluster = "VSPHERE_SPBM_POLICY_DS_CLUSTER"
|
||||
)
|
||||
|
||||
const (
|
||||
VCPScaleVolumeCount = "VCP_SCALE_VOLUME_COUNT"
|
||||
VCPScaleVolumesPerPod = "VCP_SCALE_VOLUME_PER_POD"
|
||||
VCPScaleInstances = "VCP_SCALE_INSTANCES"
|
||||
)
|
||||
|
||||
const (
|
||||
VCPStressInstances = "VCP_STRESS_INSTANCES"
|
||||
VCPStressIterations = "VCP_STRESS_ITERATIONS"
|
||||
)
|
||||
|
||||
const (
|
||||
VCPPerfVolumeCount = "VCP_PERF_VOLUME_COUNT"
|
||||
VCPPerfVolumesPerPod = "VCP_PERF_VOLUME_PER_POD"
|
||||
VCPPerfIterations = "VCP_PERF_ITERATIONS"
|
||||
)
|
||||
|
||||
func GetAndExpectStringEnvVar(varName string) string {
|
||||
varValue := os.Getenv(varName)
|
||||
Expect(varValue).NotTo(BeEmpty(), "ENV "+varName+" is not set")
|
||||
return varValue
|
||||
}
|
||||
|
||||
func GetAndExpectIntEnvVar(varName string) int {
|
||||
varValue := GetAndExpectStringEnvVar(varName)
|
||||
varIntValue, err := strconv.Atoi(varValue)
|
||||
Expect(err).NotTo(HaveOccurred(), "Error Parsing "+varName)
|
||||
return varIntValue
|
||||
}
|
233
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_scale.go
generated
vendored
Normal file
233
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_scale.go
generated
vendored
Normal file
@@ -0,0 +1,233 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
storageV1 "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
/*
|
||||
Perform vsphere volume life cycle management at scale based on user configurable value for number of volumes.
|
||||
The following actions will be performed as part of this test.
|
||||
|
||||
1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capalibilies.)
|
||||
2. Read VCP_SCALE_VOLUME_COUNT, VCP_SCALE_INSTANCES, VCP_SCALE_VOLUMES_PER_POD, VSPHERE_SPBM_POLICY_NAME, VSPHERE_DATASTORE from System Environment.
|
||||
3. Launch VCP_SCALE_INSTANCES goroutine for creating VCP_SCALE_VOLUME_COUNT volumes. Each goroutine is responsible for create/attach of VCP_SCALE_VOLUME_COUNT/VCP_SCALE_INSTANCES volumes.
|
||||
4. Read VCP_SCALE_VOLUMES_PER_POD from System Environment. Each pod will be have VCP_SCALE_VOLUMES_PER_POD attached to it.
|
||||
5. Once all the go routines are completed, we delete all the pods and volumes.
|
||||
*/
|
||||
const (
|
||||
NodeLabelKey = "vsphere_e2e_label"
|
||||
)
|
||||
|
||||
// NodeSelector holds
|
||||
type NodeSelector struct {
|
||||
labelKey string
|
||||
labelValue string
|
||||
}
|
||||
|
||||
var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
|
||||
f := framework.NewDefaultFramework("vcp-at-scale")
|
||||
|
||||
var (
|
||||
client clientset.Interface
|
||||
namespace string
|
||||
nodeSelectorList []*NodeSelector
|
||||
volumeCount int
|
||||
numberOfInstances int
|
||||
volumesPerPod int
|
||||
policyName string
|
||||
datastoreName string
|
||||
nodeVolumeMapChan chan map[string][]string
|
||||
nodes *v1.NodeList
|
||||
scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4}
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("vsphere")
|
||||
Bootstrap(f)
|
||||
client = f.ClientSet
|
||||
namespace = f.Namespace.Name
|
||||
nodeVolumeMapChan = make(chan map[string][]string)
|
||||
|
||||
// Read the environment variables
|
||||
volumeCount = GetAndExpectIntEnvVar(VCPScaleVolumeCount)
|
||||
volumesPerPod = GetAndExpectIntEnvVar(VCPScaleVolumesPerPod)
|
||||
|
||||
numberOfInstances = GetAndExpectIntEnvVar(VCPScaleInstances)
|
||||
Expect(numberOfInstances > 5).NotTo(BeTrue(), "Maximum allowed instances are 5")
|
||||
Expect(numberOfInstances > volumeCount).NotTo(BeTrue(), "Number of instances should be less than the total volume count")
|
||||
|
||||
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
|
||||
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
|
||||
|
||||
nodes = framework.GetReadySchedulableNodesOrDie(client)
|
||||
if len(nodes.Items) < 2 {
|
||||
framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
|
||||
}
|
||||
// Verify volume count specified by the user can be satisfied
|
||||
if volumeCount > volumesPerNode*len(nodes.Items) {
|
||||
framework.Skipf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), volumesPerNode*len(nodes.Items))
|
||||
}
|
||||
nodeSelectorList = createNodeLabels(client, namespace, nodes)
|
||||
})
|
||||
|
||||
/*
|
||||
Remove labels from all the nodes
|
||||
*/
|
||||
framework.AddCleanupAction(func() {
|
||||
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
|
||||
if len(namespace) > 0 {
|
||||
for _, node := range nodes.Items {
|
||||
framework.RemoveLabelOffNode(client, node.Name, NodeLabelKey)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
It("vsphere scale tests", func() {
|
||||
var pvcClaimList []string
|
||||
nodeVolumeMap := make(map[string][]string)
|
||||
// Volumes will be provisioned with each different types of Storage Class
|
||||
scArrays := make([]*storageV1.StorageClass, len(scNames))
|
||||
for index, scname := range scNames {
|
||||
// Create vSphere Storage Class
|
||||
By(fmt.Sprintf("Creating Storage Class : %q", scname))
|
||||
var sc *storageV1.StorageClass
|
||||
scParams := make(map[string]string)
|
||||
var err error
|
||||
switch scname {
|
||||
case storageclass1:
|
||||
scParams = nil
|
||||
case storageclass2:
|
||||
scParams[Policy_HostFailuresToTolerate] = "1"
|
||||
case storageclass3:
|
||||
scParams[SpbmStoragePolicy] = policyName
|
||||
case storageclass4:
|
||||
scParams[Datastore] = datastoreName
|
||||
}
|
||||
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(scname, scParams))
|
||||
Expect(sc).NotTo(BeNil(), "Storage class is empty")
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to create storage class")
|
||||
defer client.StorageV1().StorageClasses().Delete(scname, nil)
|
||||
scArrays[index] = sc
|
||||
}
|
||||
|
||||
volumeCountPerInstance := volumeCount / numberOfInstances
|
||||
for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ {
|
||||
if instanceCount == numberOfInstances-1 {
|
||||
volumeCountPerInstance = volumeCount
|
||||
}
|
||||
volumeCount = volumeCount - volumeCountPerInstance
|
||||
go VolumeCreateAndAttach(client, namespace, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan)
|
||||
}
|
||||
|
||||
// Get the list of all volumes attached to each node from the go routines by reading the data from the channel
|
||||
for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ {
|
||||
for node, volumeList := range <-nodeVolumeMapChan {
|
||||
nodeVolumeMap[node] = append(nodeVolumeMap[node], volumeList...)
|
||||
}
|
||||
}
|
||||
podList, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{})
|
||||
for _, pod := range podList.Items {
|
||||
pvcClaimList = append(pvcClaimList, getClaimsForPod(&pod, volumesPerPod)...)
|
||||
By("Deleting pod")
|
||||
err = framework.DeletePodWithWait(f, client, &pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
By("Waiting for volumes to be detached from the node")
|
||||
err = waitForVSphereDisksToDetach(nodeVolumeMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
for _, pvcClaim := range pvcClaimList {
|
||||
err = framework.DeletePersistentVolumeClaim(client, pvcClaim, namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// Get PVC claims for the pod
|
||||
func getClaimsForPod(pod *v1.Pod, volumesPerPod int) []string {
|
||||
pvcClaimList := make([]string, volumesPerPod)
|
||||
for i, volumespec := range pod.Spec.Volumes {
|
||||
if volumespec.PersistentVolumeClaim != nil {
|
||||
pvcClaimList[i] = volumespec.PersistentVolumeClaim.ClaimName
|
||||
}
|
||||
}
|
||||
return pvcClaimList
|
||||
}
|
||||
|
||||
// VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale
|
||||
func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*storageV1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) {
|
||||
defer GinkgoRecover()
|
||||
nodeVolumeMap := make(map[string][]string)
|
||||
nodeSelectorIndex := 0
|
||||
for index := 0; index < volumeCountPerInstance; index = index + volumesPerPod {
|
||||
if (volumeCountPerInstance - index) < volumesPerPod {
|
||||
volumesPerPod = volumeCountPerInstance - index
|
||||
}
|
||||
pvclaims := make([]*v1.PersistentVolumeClaim, volumesPerPod)
|
||||
for i := 0; i < volumesPerPod; i++ {
|
||||
By("Creating PVC using the Storage Class")
|
||||
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", sc[index%len(sc)]))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pvclaims[i] = pvclaim
|
||||
}
|
||||
|
||||
By("Waiting for claim to be in bound phase")
|
||||
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating pod to attach PV to the node")
|
||||
nodeSelector := nodeSelectorList[nodeSelectorIndex%len(nodeSelectorList)]
|
||||
// Create pod to attach Volume to Node
|
||||
pod, err := framework.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
for _, pv := range persistentvolumes {
|
||||
nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath)
|
||||
}
|
||||
By("Verify the volume is accessible and available in the pod")
|
||||
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
|
||||
nodeSelectorIndex++
|
||||
}
|
||||
nodeVolumeMapChan <- nodeVolumeMap
|
||||
close(nodeVolumeMapChan)
|
||||
}
|
||||
|
||||
func createNodeLabels(client clientset.Interface, namespace string, nodes *v1.NodeList) []*NodeSelector {
|
||||
var nodeSelectorList []*NodeSelector
|
||||
for i, node := range nodes.Items {
|
||||
labelVal := "vsphere_e2e_" + strconv.Itoa(i)
|
||||
nodeSelector := &NodeSelector{
|
||||
labelKey: NodeLabelKey,
|
||||
labelValue: labelVal,
|
||||
}
|
||||
nodeSelectorList = append(nodeSelectorList, nodeSelector)
|
||||
framework.AddOrUpdateLabelOnNode(client, node.Name, NodeLabelKey, labelVal)
|
||||
}
|
||||
return nodeSelectorList
|
||||
}
|
153
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_statefulsets.go
generated
vendored
Normal file
153
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_statefulsets.go
generated
vendored
Normal file
@@ -0,0 +1,153 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
/*
|
||||
Test performs following operations
|
||||
|
||||
Steps
|
||||
1. Create a storage class with thin diskformat.
|
||||
2. Create nginx service.
|
||||
3. Create nginx statefulsets with 3 replicas.
|
||||
4. Wait until all Pods are ready and PVCs are bounded with PV.
|
||||
5. Verify volumes are accessible in all statefulsets pods with creating empty file.
|
||||
6. Scale down statefulsets to 2 replicas.
|
||||
7. Scale up statefulsets to 4 replicas.
|
||||
8. Scale down statefulsets to 0 replicas and delete all pods.
|
||||
9. Delete all PVCs from the test namespace.
|
||||
10. Delete the storage class.
|
||||
*/
|
||||
|
||||
const (
|
||||
manifestPath = "test/e2e/testing-manifests/statefulset/nginx"
|
||||
mountPath = "/usr/share/nginx/html"
|
||||
storageclassname = "nginx-sc"
|
||||
)
|
||||
|
||||
var _ = utils.SIGDescribe("vsphere statefulset", func() {
|
||||
f := framework.NewDefaultFramework("vsphere-statefulset")
|
||||
var (
|
||||
namespace string
|
||||
client clientset.Interface
|
||||
)
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("vsphere")
|
||||
namespace = f.Namespace.Name
|
||||
client = f.ClientSet
|
||||
Bootstrap(f)
|
||||
})
|
||||
AfterEach(func() {
|
||||
framework.Logf("Deleting all statefulset in namespace: %v", namespace)
|
||||
framework.DeleteAllStatefulSets(client, namespace)
|
||||
})
|
||||
|
||||
It("vsphere statefulset testing", func() {
|
||||
By("Creating StorageClass for Statefulset")
|
||||
scParameters := make(map[string]string)
|
||||
scParameters["diskformat"] = "thin"
|
||||
scSpec := getVSphereStorageClassSpec(storageclassname, scParameters)
|
||||
sc, err := client.StorageV1().StorageClasses().Create(scSpec)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer client.StorageV1().StorageClasses().Delete(sc.Name, nil)
|
||||
|
||||
By("Creating statefulset")
|
||||
statefulsetTester := framework.NewStatefulSetTester(client)
|
||||
statefulset := statefulsetTester.CreateStatefulSet(manifestPath, namespace)
|
||||
replicas := *(statefulset.Spec.Replicas)
|
||||
// Waiting for pods status to be Ready
|
||||
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas)
|
||||
Expect(statefulsetTester.CheckMount(statefulset, mountPath)).NotTo(HaveOccurred())
|
||||
ssPodsBeforeScaleDown := statefulsetTester.GetPodList(statefulset)
|
||||
Expect(ssPodsBeforeScaleDown.Items).NotTo(BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
|
||||
Expect(len(ssPodsBeforeScaleDown.Items) == int(replicas)).To(BeTrue(), "Number of Pods in the statefulset should match with number of replicas")
|
||||
|
||||
// Get the list of Volumes attached to Pods before scale down
|
||||
volumesBeforeScaleDown := make(map[string]string)
|
||||
for _, sspod := range ssPodsBeforeScaleDown.Items {
|
||||
_, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, volumespec := range sspod.Spec.Volumes {
|
||||
if volumespec.PersistentVolumeClaim != nil {
|
||||
volumePath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
|
||||
volumesBeforeScaleDown[volumePath] = volumespec.PersistentVolumeClaim.ClaimName
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1))
|
||||
_, scaledownErr := statefulsetTester.Scale(statefulset, replicas-1)
|
||||
Expect(scaledownErr).NotTo(HaveOccurred())
|
||||
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas-1)
|
||||
|
||||
// After scale down, verify vsphere volumes are detached from deleted pods
|
||||
By("Verify Volumes are detached from Nodes after Statefulsets is scaled down")
|
||||
for _, sspod := range ssPodsBeforeScaleDown.Items {
|
||||
_, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Expect(apierrs.IsNotFound(err), BeTrue())
|
||||
for _, volumespec := range sspod.Spec.Volumes {
|
||||
if volumespec.PersistentVolumeClaim != nil {
|
||||
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
|
||||
framework.Logf("Waiting for Volume: %q to detach from Node: %q", vSpherediskPath, sspod.Spec.NodeName)
|
||||
Expect(waitForVSphereDiskToDetach(vSpherediskPath, sspod.Spec.NodeName)).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas))
|
||||
_, scaleupErr := statefulsetTester.Scale(statefulset, replicas)
|
||||
Expect(scaleupErr).NotTo(HaveOccurred())
|
||||
statefulsetTester.WaitForStatusReplicas(statefulset, replicas)
|
||||
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas)
|
||||
|
||||
ssPodsAfterScaleUp := statefulsetTester.GetPodList(statefulset)
|
||||
Expect(ssPodsAfterScaleUp.Items).NotTo(BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
|
||||
Expect(len(ssPodsAfterScaleUp.Items) == int(replicas)).To(BeTrue(), "Number of Pods in the statefulset should match with number of replicas")
|
||||
|
||||
// After scale up, verify all vsphere volumes are attached to node VMs.
|
||||
By("Verify all volumes are attached to Nodes after Statefulsets is scaled up")
|
||||
for _, sspod := range ssPodsAfterScaleUp.Items {
|
||||
err := framework.WaitForPodsReady(client, statefulset.Namespace, sspod.Name, 0)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pod, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, volumespec := range pod.Spec.Volumes {
|
||||
if volumespec.PersistentVolumeClaim != nil {
|
||||
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
|
||||
framework.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName)
|
||||
// Verify scale up has re-attached the same volumes and not introduced new volume
|
||||
Expect(volumesBeforeScaleDown[vSpherediskPath] == "").To(BeFalse())
|
||||
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(vSpherediskPath, sspod.Spec.NodeName)
|
||||
Expect(isVolumeAttached).To(BeTrue())
|
||||
Expect(verifyDiskAttachedError).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
172
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_stress.go
generated
vendored
Normal file
172
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_stress.go
generated
vendored
Normal file
@@ -0,0 +1,172 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
storageV1 "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
/*
|
||||
Induce stress to create volumes in parallel with multiple threads based on user configurable values for number of threads and iterations per thread.
|
||||
The following actions will be performed as part of this test.
|
||||
|
||||
1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capalibilies.)
|
||||
2. READ VCP_STRESS_INSTANCES, VCP_STRESS_ITERATIONS, VSPHERE_SPBM_POLICY_NAME and VSPHERE_DATASTORE from System Environment.
|
||||
3. Launch goroutine for volume lifecycle operations.
|
||||
4. Each instance of routine iterates for n times, where n is read from system env - VCP_STRESS_ITERATIONS
|
||||
5. Each iteration creates 1 PVC, 1 POD using the provisioned PV, Verify disk is attached to the node, Verify pod can access the volume, delete the pod and finally delete the PVC.
|
||||
*/
|
||||
var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", func() {
|
||||
f := framework.NewDefaultFramework("vcp-stress")
|
||||
var (
|
||||
client clientset.Interface
|
||||
namespace string
|
||||
instances int
|
||||
iterations int
|
||||
policyName string
|
||||
datastoreName string
|
||||
err error
|
||||
scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4}
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("vsphere")
|
||||
client = f.ClientSet
|
||||
namespace = f.Namespace.Name
|
||||
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
|
||||
|
||||
// if VCP_STRESS_INSTANCES = 12 and VCP_STRESS_ITERATIONS is 10. 12 threads will run in parallel for 10 times.
|
||||
// Resulting 120 Volumes and POD Creation. Volumes will be provisioned with each different types of Storage Class,
|
||||
// Each iteration creates PVC, verify PV is provisioned, then creates a pod, verify volume is attached to the node, and then delete the pod and delete pvc.
|
||||
instances = GetAndExpectIntEnvVar(VCPStressInstances)
|
||||
Expect(instances <= volumesPerNode*len(nodeList.Items)).To(BeTrue(), fmt.Sprintf("Number of Instances should be less or equal: %v", volumesPerNode*len(nodeList.Items)))
|
||||
Expect(instances > len(scNames)).To(BeTrue(), "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes")
|
||||
|
||||
iterations = GetAndExpectIntEnvVar(VCPStressIterations)
|
||||
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_STRESS_ITERATIONS")
|
||||
Expect(iterations > 0).To(BeTrue(), "VCP_STRESS_ITERATIONS should be greater than 0")
|
||||
|
||||
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
|
||||
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
|
||||
})
|
||||
|
||||
It("vsphere stress tests", func() {
|
||||
scArrays := make([]*storageV1.StorageClass, len(scNames))
|
||||
for index, scname := range scNames {
|
||||
// Create vSphere Storage Class
|
||||
By(fmt.Sprintf("Creating Storage Class : %v", scname))
|
||||
var sc *storageV1.StorageClass
|
||||
var err error
|
||||
switch scname {
|
||||
case storageclass1:
|
||||
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass1, nil))
|
||||
case storageclass2:
|
||||
var scVSanParameters map[string]string
|
||||
scVSanParameters = make(map[string]string)
|
||||
scVSanParameters[Policy_HostFailuresToTolerate] = "1"
|
||||
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters))
|
||||
case storageclass3:
|
||||
var scSPBMPolicyParameters map[string]string
|
||||
scSPBMPolicyParameters = make(map[string]string)
|
||||
scSPBMPolicyParameters[SpbmStoragePolicy] = policyName
|
||||
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters))
|
||||
case storageclass4:
|
||||
var scWithDSParameters map[string]string
|
||||
scWithDSParameters = make(map[string]string)
|
||||
scWithDSParameters[Datastore] = datastoreName
|
||||
scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters)
|
||||
sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec)
|
||||
}
|
||||
Expect(sc).NotTo(BeNil())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer client.StorageV1().StorageClasses().Delete(scname, nil)
|
||||
scArrays[index] = sc
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(instances)
|
||||
for instanceCount := 0; instanceCount < instances; instanceCount++ {
|
||||
instanceId := fmt.Sprintf("Thread:%v", instanceCount+1)
|
||||
go PerformVolumeLifeCycleInParallel(f, client, namespace, instanceId, scArrays[instanceCount%len(scArrays)], iterations, &wg)
|
||||
}
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
// goroutine to perform volume lifecycle operations in parallel
|
||||
func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.Interface, namespace string, instanceId string, sc *storageV1.StorageClass, iterations int, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
defer GinkgoRecover()
|
||||
|
||||
for iterationCount := 0; iterationCount < iterations; iterationCount++ {
|
||||
logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceId, iterationCount+1)
|
||||
By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name))
|
||||
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
|
||||
|
||||
var pvclaims []*v1.PersistentVolumeClaim
|
||||
pvclaims = append(pvclaims, pvclaim)
|
||||
By(fmt.Sprintf("%v Waiting for claim: %v to be in bound phase", logPrefix, pvclaim.Name))
|
||||
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name))
|
||||
// Create pod to attach Volume to Node
|
||||
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name))
|
||||
Expect(f.WaitForPodRunningSlow(pod.Name)).NotTo(HaveOccurred())
|
||||
|
||||
// Get the copy of the Pod to know the assigned node name.
|
||||
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
|
||||
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
|
||||
Expect(isVolumeAttached).To(BeTrue())
|
||||
Expect(verifyDiskAttachedError).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("%v Verifing the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name))
|
||||
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
|
||||
|
||||
By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name))
|
||||
err = framework.DeletePodWithWait(f, client, pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
|
||||
err = waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name))
|
||||
Expect(framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
822
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_utils.go
generated
vendored
Normal file
822
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_utils.go
generated
vendored
Normal file
@@ -0,0 +1,822 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/vmware/govmomi/find"
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/vim25/mo"
|
||||
vim25types "github.com/vmware/govmomi/vim25/types"
|
||||
vimtypes "github.com/vmware/govmomi/vim25/types"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
volumesPerNode = 55
|
||||
storageclass1 = "sc-default"
|
||||
storageclass2 = "sc-vsan"
|
||||
storageclass3 = "sc-spbm"
|
||||
storageclass4 = "sc-user-specified-ds"
|
||||
DummyDiskName = "kube-dummyDisk.vmdk"
|
||||
ProviderPrefix = "vsphere://"
|
||||
)
|
||||
|
||||
// volumeState represents the state of a volume.
|
||||
type volumeState int32
|
||||
|
||||
const (
|
||||
volumeStateDetached volumeState = 1
|
||||
volumeStateAttached volumeState = 2
|
||||
)
|
||||
|
||||
// Wait until vsphere volumes are detached from the list of nodes or time out after 5 minutes
|
||||
func waitForVSphereDisksToDetach(nodeVolumes map[string][]string) error {
|
||||
var (
|
||||
err error
|
||||
disksAttached = true
|
||||
detachTimeout = 5 * time.Minute
|
||||
detachPollTime = 10 * time.Second
|
||||
)
|
||||
err = wait.Poll(detachPollTime, detachTimeout, func() (bool, error) {
|
||||
attachedResult, err := disksAreAttached(nodeVolumes)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for nodeName, nodeVolumes := range attachedResult {
|
||||
for volumePath, attached := range nodeVolumes {
|
||||
if attached {
|
||||
framework.Logf("Waiting for volumes %q to detach from %q.", volumePath, string(nodeName))
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
disksAttached = false
|
||||
framework.Logf("Volume are successfully detached from all the nodes: %+v", nodeVolumes)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if disksAttached {
|
||||
return fmt.Errorf("Gave up waiting for volumes to detach after %v", detachTimeout)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait until vsphere vmdk moves to expected state on the given node, or time out after 6 minutes
|
||||
func waitForVSphereDiskStatus(volumePath string, nodeName string, expectedState volumeState) error {
|
||||
var (
|
||||
err error
|
||||
diskAttached bool
|
||||
currentState volumeState
|
||||
timeout = 6 * time.Minute
|
||||
pollTime = 10 * time.Second
|
||||
)
|
||||
|
||||
var attachedState = map[bool]volumeState{
|
||||
true: volumeStateAttached,
|
||||
false: volumeStateDetached,
|
||||
}
|
||||
|
||||
var attachedStateMsg = map[volumeState]string{
|
||||
volumeStateAttached: "attached to",
|
||||
volumeStateDetached: "detached from",
|
||||
}
|
||||
|
||||
err = wait.Poll(pollTime, timeout, func() (bool, error) {
|
||||
diskAttached, err = diskIsAttached(volumePath, nodeName)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
currentState = attachedState[diskAttached]
|
||||
if currentState == expectedState {
|
||||
framework.Logf("Volume %q has successfully %s %q", volumePath, attachedStateMsg[currentState], nodeName)
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("Waiting for Volume %q to be %s %q.", volumePath, attachedStateMsg[expectedState], nodeName)
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if currentState != expectedState {
|
||||
err = fmt.Errorf("Gave up waiting for Volume %q to be %s %q after %v", volumePath, attachedStateMsg[expectedState], nodeName, timeout)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait until vsphere vmdk is attached from the given node or time out after 6 minutes
|
||||
func waitForVSphereDiskToAttach(volumePath string, nodeName string) error {
|
||||
return waitForVSphereDiskStatus(volumePath, nodeName, volumeStateAttached)
|
||||
}
|
||||
|
||||
// Wait until vsphere vmdk is detached from the given node or time out after 6 minutes
|
||||
func waitForVSphereDiskToDetach(volumePath string, nodeName string) error {
|
||||
return waitForVSphereDiskStatus(volumePath, nodeName, volumeStateDetached)
|
||||
}
|
||||
|
||||
// function to create vsphere volume spec with given VMDK volume path, Reclaim Policy and labels
|
||||
func getVSpherePersistentVolumeSpec(volumePath string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy, labels map[string]string) *v1.PersistentVolume {
|
||||
var (
|
||||
pvConfig framework.PersistentVolumeConfig
|
||||
pv *v1.PersistentVolume
|
||||
claimRef *v1.ObjectReference
|
||||
)
|
||||
pvConfig = framework.PersistentVolumeConfig{
|
||||
NamePrefix: "vspherepv-",
|
||||
PVSource: v1.PersistentVolumeSource{
|
||||
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
|
||||
VolumePath: volumePath,
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
Prebind: nil,
|
||||
}
|
||||
|
||||
pv = &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: pvConfig.NamePrefix,
|
||||
Annotations: map[string]string{
|
||||
util.VolumeGidAnnotationKey: "777",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy,
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
|
||||
},
|
||||
PersistentVolumeSource: pvConfig.PVSource,
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
ClaimRef: claimRef,
|
||||
},
|
||||
}
|
||||
if labels != nil {
|
||||
pv.Labels = labels
|
||||
}
|
||||
return pv
|
||||
}
|
||||
|
||||
// function to get vsphere persistent volume spec with given selector labels.
|
||||
func getVSpherePersistentVolumeClaimSpec(namespace string, labels map[string]string) *v1.PersistentVolumeClaim {
|
||||
var (
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
)
|
||||
pvc = &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pvc-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if labels != nil {
|
||||
pvc.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels}
|
||||
}
|
||||
|
||||
return pvc
|
||||
}
|
||||
|
||||
// function to write content to the volume backed by given PVC
|
||||
func writeContentToVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
|
||||
utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data")
|
||||
framework.Logf("Done with writing content to volume")
|
||||
}
|
||||
|
||||
// function to verify content is matching on the volume backed for given PVC
|
||||
func verifyContentOfVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
|
||||
utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "grep '"+expectedContent+"' /mnt/test/data")
|
||||
framework.Logf("Successfully verified content of the volume")
|
||||
}
|
||||
|
||||
func getVSphereStorageClassSpec(name string, scParameters map[string]string) *storage.StorageClass {
|
||||
var sc *storage.StorageClass
|
||||
|
||||
sc = &storage.StorageClass{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Provisioner: "kubernetes.io/vsphere-volume",
|
||||
}
|
||||
if scParameters != nil {
|
||||
sc.Parameters = scParameters
|
||||
}
|
||||
return sc
|
||||
}
|
||||
|
||||
func getVSphereClaimSpecWithStorageClass(ns string, diskSize string, storageclass *storage.StorageClass) *v1.PersistentVolumeClaim {
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pvc-",
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(diskSize),
|
||||
},
|
||||
},
|
||||
StorageClassName: &(storageclass.Name),
|
||||
},
|
||||
}
|
||||
return claim
|
||||
}
|
||||
|
||||
// func to get pod spec with given volume claim, node selector labels and command
|
||||
func getVSpherePodSpecWithClaim(claimName string, nodeSelectorKV map[string]string, command string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pod-pvc-",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "volume-tester",
|
||||
Image: "busybox",
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", command},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "my-volume",
|
||||
MountPath: "/mnt/test",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "my-volume",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: claimName,
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if nodeSelectorKV != nil {
|
||||
pod.Spec.NodeSelector = nodeSelectorKV
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
// func to get pod spec with given volume paths, node selector lables and container commands
|
||||
func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[string]string, commands []string) *v1.Pod {
|
||||
var volumeMounts []v1.VolumeMount
|
||||
var volumes []v1.Volume
|
||||
|
||||
for index, volumePath := range volumePaths {
|
||||
name := fmt.Sprintf("volume%v", index+1)
|
||||
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: name, MountPath: "/mnt/" + name})
|
||||
vsphereVolume := new(v1.VsphereVirtualDiskVolumeSource)
|
||||
vsphereVolume.VolumePath = volumePath
|
||||
vsphereVolume.FSType = "ext4"
|
||||
volumes = append(volumes, v1.Volume{Name: name})
|
||||
volumes[index].VolumeSource.VsphereVolume = vsphereVolume
|
||||
}
|
||||
|
||||
if commands == nil || len(commands) == 0 {
|
||||
commands = []string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"while true; do sleep 2; done",
|
||||
}
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "vsphere-e2e-",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "vsphere-e2e-container-" + string(uuid.NewUUID()),
|
||||
Image: "busybox",
|
||||
Command: commands,
|
||||
VolumeMounts: volumeMounts,
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Volumes: volumes,
|
||||
},
|
||||
}
|
||||
|
||||
if keyValuelabel != nil {
|
||||
pod.Spec.NodeSelector = keyValuelabel
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func verifyFilesExistOnVSphereVolume(namespace string, podName string, filePaths ...string) {
|
||||
for _, filePath := range filePaths {
|
||||
_, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/ls", filePath)
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed to verify file: %q on the pod: %q", filePath, podName))
|
||||
}
|
||||
}
|
||||
|
||||
func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths []string) {
|
||||
for _, filePath := range filePaths {
|
||||
err := framework.CreateEmptyFileOnPod(namespace, podName, filePath)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
|
||||
// verify volumes are attached to the node and are accessible in pod
|
||||
func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persistentvolumes []*v1.PersistentVolume) {
|
||||
nodeName := pod.Spec.NodeName
|
||||
namespace := pod.Namespace
|
||||
for index, pv := range persistentvolumes {
|
||||
// Verify disks are attached to the node
|
||||
isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath))
|
||||
// Verify Volumes are accessible
|
||||
filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt")
|
||||
_, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
|
||||
// Get vSphere Volume Path from PVC
|
||||
func getvSphereVolumePathFromClaim(client clientset.Interface, namespace string, claimName string) string {
|
||||
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return pv.Spec.VsphereVolume.VolumePath
|
||||
}
|
||||
|
||||
// Get canonical volume path for volume Path.
|
||||
// Example1: The canonical path for volume path - [vsanDatastore] kubevols/volume.vmdk will be [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk
|
||||
// Example2: The canonical path for volume path - [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk will be same as volume Path.
|
||||
func getCanonicalVolumePath(ctx context.Context, dc *object.Datacenter, volumePath string) (string, error) {
|
||||
var folderID string
|
||||
canonicalVolumePath := volumePath
|
||||
dsPathObj, err := getDatastorePathObjFromVMDiskPath(volumePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
dsPath := strings.Split(strings.TrimSpace(dsPathObj.Path), "/")
|
||||
if len(dsPath) <= 1 {
|
||||
return canonicalVolumePath, nil
|
||||
}
|
||||
datastore := dsPathObj.Datastore
|
||||
dsFolder := dsPath[0]
|
||||
// Get the datastore folder ID if datastore or folder doesn't exist in datastoreFolderIDMap
|
||||
if !isValidUUID(dsFolder) {
|
||||
dummyDiskVolPath := "[" + datastore + "] " + dsFolder + "/" + DummyDiskName
|
||||
// Querying a non-existent dummy disk on the datastore folder.
|
||||
// It would fail and return an folder ID in the error message.
|
||||
_, err := getVirtualDiskPage83Data(ctx, dc, dummyDiskVolPath)
|
||||
if err != nil {
|
||||
re := regexp.MustCompile("File (.*?) was not found")
|
||||
match := re.FindStringSubmatch(err.Error())
|
||||
canonicalVolumePath = match[1]
|
||||
}
|
||||
}
|
||||
diskPath := getPathFromVMDiskPath(canonicalVolumePath)
|
||||
if diskPath == "" {
|
||||
return "", fmt.Errorf("Failed to parse canonicalVolumePath: %s in getcanonicalVolumePath method", canonicalVolumePath)
|
||||
}
|
||||
folderID = strings.Split(strings.TrimSpace(diskPath), "/")[0]
|
||||
canonicalVolumePath = strings.Replace(volumePath, dsFolder, folderID, 1)
|
||||
return canonicalVolumePath, nil
|
||||
}
|
||||
|
||||
// getPathFromVMDiskPath retrieves the path from VM Disk Path.
|
||||
// Example: For vmDiskPath - [vsanDatastore] kubevols/volume.vmdk, the path is kubevols/volume.vmdk
|
||||
func getPathFromVMDiskPath(vmDiskPath string) string {
|
||||
datastorePathObj := new(object.DatastorePath)
|
||||
isSuccess := datastorePathObj.FromString(vmDiskPath)
|
||||
if !isSuccess {
|
||||
framework.Logf("Failed to parse vmDiskPath: %s", vmDiskPath)
|
||||
return ""
|
||||
}
|
||||
return datastorePathObj.Path
|
||||
}
|
||||
|
||||
//getDatastorePathObjFromVMDiskPath gets the datastorePathObj from VM disk path.
|
||||
func getDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath, error) {
|
||||
datastorePathObj := new(object.DatastorePath)
|
||||
isSuccess := datastorePathObj.FromString(vmDiskPath)
|
||||
if !isSuccess {
|
||||
framework.Logf("Failed to parse volPath: %s", vmDiskPath)
|
||||
return nil, fmt.Errorf("Failed to parse volPath: %s", vmDiskPath)
|
||||
}
|
||||
return datastorePathObj, nil
|
||||
}
|
||||
|
||||
// getVirtualDiskPage83Data gets the virtual disk UUID by diskPath
|
||||
func getVirtualDiskPage83Data(ctx context.Context, dc *object.Datacenter, diskPath string) (string, error) {
|
||||
if len(diskPath) > 0 && filepath.Ext(diskPath) != ".vmdk" {
|
||||
diskPath += ".vmdk"
|
||||
}
|
||||
vdm := object.NewVirtualDiskManager(dc.Client())
|
||||
// Returns uuid of vmdk virtual disk
|
||||
diskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, dc)
|
||||
|
||||
if err != nil {
|
||||
glog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err)
|
||||
return "", err
|
||||
}
|
||||
diskUUID = formatVirtualDiskUUID(diskUUID)
|
||||
return diskUUID, nil
|
||||
}
|
||||
|
||||
// formatVirtualDiskUUID removes any spaces and hyphens in UUID
|
||||
// Example UUID input is 42375390-71f9-43a3-a770-56803bcd7baa and output after format is 4237539071f943a3a77056803bcd7baa
|
||||
func formatVirtualDiskUUID(uuid string) string {
|
||||
uuidwithNoSpace := strings.Replace(uuid, " ", "", -1)
|
||||
uuidWithNoHypens := strings.Replace(uuidwithNoSpace, "-", "", -1)
|
||||
return strings.ToLower(uuidWithNoHypens)
|
||||
}
|
||||
|
||||
//isValidUUID checks if the string is a valid UUID.
|
||||
func isValidUUID(uuid string) bool {
|
||||
r := regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$")
|
||||
return r.MatchString(uuid)
|
||||
}
|
||||
|
||||
// removeStorageClusterORFolderNameFromVDiskPath removes the cluster or folder path from the vDiskPath
|
||||
// for vDiskPath [DatastoreCluster/sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value is [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
|
||||
// for vDiskPath [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value remains same [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
|
||||
func removeStorageClusterORFolderNameFromVDiskPath(vDiskPath string) string {
|
||||
datastore := regexp.MustCompile("\\[(.*?)\\]").FindStringSubmatch(vDiskPath)[1]
|
||||
if filepath.Base(datastore) != datastore {
|
||||
vDiskPath = strings.Replace(vDiskPath, datastore, filepath.Base(datastore), 1)
|
||||
}
|
||||
return vDiskPath
|
||||
}
|
||||
|
||||
// getVirtualDeviceByPath gets the virtual device by path
|
||||
func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, diskPath string) (vim25types.BaseVirtualDevice, error) {
|
||||
vmDevices, err := vm.Device(ctx)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// filter vm devices to retrieve device for the given vmdk file identified by disk path
|
||||
for _, device := range vmDevices {
|
||||
if vmDevices.TypeName(device) == "VirtualDisk" {
|
||||
virtualDevice := device.GetVirtualDevice()
|
||||
if backing, ok := virtualDevice.Backing.(*vim25types.VirtualDiskFlatVer2BackingInfo); ok {
|
||||
if matchVirtualDiskAndVolPath(backing.FileName, diskPath) {
|
||||
framework.Logf("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
|
||||
return device, nil
|
||||
} else {
|
||||
framework.Logf("VirtualDisk backing filename %q does not match with diskPath %q", backing.FileName, diskPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func matchVirtualDiskAndVolPath(diskPath, volPath string) bool {
|
||||
fileExt := ".vmdk"
|
||||
diskPath = strings.TrimSuffix(diskPath, fileExt)
|
||||
volPath = strings.TrimSuffix(volPath, fileExt)
|
||||
return diskPath == volPath
|
||||
}
|
||||
|
||||
// convertVolPathsToDevicePaths removes cluster or folder path from volPaths and convert to canonicalPath
|
||||
func convertVolPathsToDevicePaths(ctx context.Context, nodeVolumes map[string][]string) (map[string][]string, error) {
|
||||
vmVolumes := make(map[string][]string)
|
||||
for nodeName, volPaths := range nodeVolumes {
|
||||
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
|
||||
datacenter := nodeInfo.VSphere.GetDatacenterFromObjectReference(ctx, nodeInfo.DataCenterRef)
|
||||
for i, volPath := range volPaths {
|
||||
deviceVolPath, err := convertVolPathToDevicePath(ctx, datacenter, volPath)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err)
|
||||
return nil, err
|
||||
}
|
||||
volPaths[i] = deviceVolPath
|
||||
}
|
||||
vmVolumes[nodeName] = volPaths
|
||||
}
|
||||
return vmVolumes, nil
|
||||
}
|
||||
|
||||
// convertVolPathToDevicePath takes volPath and returns canonical volume path
|
||||
func convertVolPathToDevicePath(ctx context.Context, dc *object.Datacenter, volPath string) (string, error) {
|
||||
volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
|
||||
// Get the canonical volume path for volPath.
|
||||
canonicalVolumePath, err := getCanonicalVolumePath(ctx, dc, volPath)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err)
|
||||
return "", err
|
||||
}
|
||||
// Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map
|
||||
if len(canonicalVolumePath) > 0 && filepath.Ext(canonicalVolumePath) != ".vmdk" {
|
||||
canonicalVolumePath += ".vmdk"
|
||||
}
|
||||
return canonicalVolumePath, nil
|
||||
}
|
||||
|
||||
// get .vmx file path for a virtual machine
|
||||
func getVMXFilePath(vmObject *object.VirtualMachine) (vmxPath string) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var nodeVM mo.VirtualMachine
|
||||
err := vmObject.Properties(ctx, vmObject.Reference(), []string{"config.files"}, &nodeVM)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(nodeVM.Config).NotTo(BeNil())
|
||||
|
||||
vmxPath = nodeVM.Config.Files.VmPathName
|
||||
framework.Logf("vmx file path is %s", vmxPath)
|
||||
return vmxPath
|
||||
}
|
||||
|
||||
// verify ready node count. Try upto 3 minutes. Return true if count is expected count
|
||||
func verifyReadyNodeCount(client clientset.Interface, expectedNodes int) bool {
|
||||
numNodes := 0
|
||||
for i := 0; i < 36; i++ {
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(client)
|
||||
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
|
||||
|
||||
numNodes = len(nodeList.Items)
|
||||
if numNodes == expectedNodes {
|
||||
break
|
||||
}
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
return (numNodes == expectedNodes)
|
||||
}
|
||||
|
||||
// poweroff nodeVM and confirm the poweroff state
|
||||
func poweroffNodeVM(nodeName string, vm *object.VirtualMachine) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
framework.Logf("Powering off node VM %s", nodeName)
|
||||
|
||||
_, err := vm.PowerOff(ctx)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOff)
|
||||
Expect(err).NotTo(HaveOccurred(), "Unable to power off the node")
|
||||
}
|
||||
|
||||
// poweron nodeVM and confirm the poweron state
|
||||
func poweronNodeVM(nodeName string, vm *object.VirtualMachine) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
framework.Logf("Powering on node VM %s", nodeName)
|
||||
|
||||
vm.PowerOn(ctx)
|
||||
err := vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOn)
|
||||
Expect(err).NotTo(HaveOccurred(), "Unable to power on the node")
|
||||
}
|
||||
|
||||
// unregister a nodeVM from VC
|
||||
func unregisterNodeVM(nodeName string, vm *object.VirtualMachine) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
poweroffNodeVM(nodeName, vm)
|
||||
|
||||
framework.Logf("Unregistering node VM %s", nodeName)
|
||||
err := vm.Unregister(ctx)
|
||||
Expect(err).NotTo(HaveOccurred(), "Unable to unregister the node")
|
||||
}
|
||||
|
||||
// register a nodeVM into a VC
|
||||
func registerNodeVM(nodeName, workingDir, vmxFilePath string, rpool *object.ResourcePool, host *object.HostSystem) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
framework.Logf("Registering node VM %s with vmx file path %s", nodeName, vmxFilePath)
|
||||
|
||||
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
|
||||
finder := find.NewFinder(nodeInfo.VSphere.Client.Client, false)
|
||||
|
||||
vmFolder, err := finder.FolderOrDefault(ctx, workingDir)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
registerTask, err := vmFolder.RegisterVM(ctx, vmxFilePath, nodeName, false, rpool, host)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = registerTask.Wait(ctx)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
vmPath := filepath.Join(workingDir, nodeName)
|
||||
vm, err := finder.VirtualMachine(ctx, vmPath)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
poweronNodeVM(nodeName, vm)
|
||||
}
|
||||
|
||||
// disksAreAttached takes map of node and it's volumes and returns map of node, its volumes and attachment state
|
||||
func disksAreAttached(nodeVolumes map[string][]string) (nodeVolumesAttachMap map[string]map[string]bool, err error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
disksAttached := make(map[string]map[string]bool)
|
||||
if len(nodeVolumes) == 0 {
|
||||
return disksAttached, nil
|
||||
}
|
||||
// Convert VolPaths into canonical form so that it can be compared with the VM device path.
|
||||
vmVolumes, err := convertVolPathsToDevicePaths(ctx, nodeVolumes)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err)
|
||||
return nil, err
|
||||
}
|
||||
for vm, volumes := range vmVolumes {
|
||||
volumeAttachedMap := make(map[string]bool)
|
||||
for _, volume := range volumes {
|
||||
attached, err := diskIsAttached(volume, vm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
volumeAttachedMap[volume] = attached
|
||||
}
|
||||
nodeVolumesAttachMap[vm] = volumeAttachedMap
|
||||
}
|
||||
return disksAttached, nil
|
||||
}
|
||||
|
||||
// diskIsAttached returns if disk is attached to the VM using controllers supported by the plugin.
|
||||
func diskIsAttached(volPath string, nodeName string) (bool, error) {
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
|
||||
Connect(ctx, nodeInfo.VSphere)
|
||||
vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
|
||||
volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
|
||||
device, err := getVirtualDeviceByPath(ctx, vm, volPath)
|
||||
if err != nil {
|
||||
framework.Logf("diskIsAttached failed to determine whether disk %q is still attached on node %q",
|
||||
volPath,
|
||||
nodeName)
|
||||
return false, err
|
||||
}
|
||||
if device == nil {
|
||||
return false, nil
|
||||
}
|
||||
framework.Logf("diskIsAttached found the disk %q attached on node %q", volPath, nodeName)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// getUUIDFromProviderID strips ProviderPrefix - "vsphere://" from the providerID
|
||||
// this gives the VM UUID which can be used to find Node VM from vCenter
|
||||
func getUUIDFromProviderID(providerID string) string {
|
||||
return strings.TrimPrefix(providerID, ProviderPrefix)
|
||||
}
|
||||
|
||||
// GetAllReadySchedulableNodeInfos returns NodeInfo objects for all nodes with Ready and schedulable state
|
||||
func GetReadySchedulableNodeInfos() []*NodeInfo {
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
|
||||
var nodesInfo []*NodeInfo
|
||||
for _, node := range nodeList.Items {
|
||||
nodeInfo := TestContext.NodeMapper.GetNodeInfo(node.Name)
|
||||
if nodeInfo != nil {
|
||||
nodesInfo = append(nodesInfo, nodeInfo)
|
||||
}
|
||||
}
|
||||
return nodesInfo
|
||||
}
|
||||
|
||||
// GetReadySchedulableRandomNodeInfo returns NodeInfo object for one of the Ready and Schedulable Node.
|
||||
// if multiple nodes are present with Ready and Scheduable state then one of the Node is selected randomly
|
||||
// and it's associated NodeInfo object is returned.
|
||||
func GetReadySchedulableRandomNodeInfo() *NodeInfo {
|
||||
nodesInfo := GetReadySchedulableNodeInfos()
|
||||
rand.Seed(time.Now().Unix())
|
||||
Expect(nodesInfo).NotTo(BeEmpty())
|
||||
return nodesInfo[rand.Int()%len(nodesInfo)]
|
||||
}
|
||||
|
||||
// invokeVCenterServiceControl invokes the given command for the given service
|
||||
// via service-control on the given vCenter host over SSH.
|
||||
func invokeVCenterServiceControl(command, service, host string) error {
|
||||
sshCmd := fmt.Sprintf("service-control --%s %s", command, service)
|
||||
framework.Logf("Invoking command %v on vCenter host %v", sshCmd, host)
|
||||
result, err := framework.SSH(sshCmd, host, framework.TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
framework.LogSSHResult(result)
|
||||
return fmt.Errorf("couldn't execute command: %s on vCenter host: %v", sshCmd, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// expectVolumeToBeAttached checks if the given Volume is attached to the given
|
||||
// Node, else fails.
|
||||
func expectVolumeToBeAttached(nodeName, volumePath string) {
|
||||
isAttached, err := diskIsAttached(volumePath, nodeName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath))
|
||||
}
|
||||
|
||||
// expectVolumesToBeAttached checks if the given Volumes are attached to the
|
||||
// corresponding set of Nodes, else fails.
|
||||
func expectVolumesToBeAttached(pods []*v1.Pod, volumePaths []string) {
|
||||
for i, pod := range pods {
|
||||
nodeName := pod.Spec.NodeName
|
||||
volumePath := volumePaths[i]
|
||||
By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName))
|
||||
expectVolumeToBeAttached(nodeName, volumePath)
|
||||
}
|
||||
}
|
||||
|
||||
// expectFilesToBeAccessible checks if the given files are accessible on the
|
||||
// corresponding set of Nodes, else fails.
|
||||
func expectFilesToBeAccessible(namespace string, pods []*v1.Pod, filePaths []string) {
|
||||
for i, pod := range pods {
|
||||
podName := pod.Name
|
||||
filePath := filePaths[i]
|
||||
By(fmt.Sprintf("Verifying that file %v is accessible on pod %v", filePath, podName))
|
||||
verifyFilesExistOnVSphereVolume(namespace, podName, filePath)
|
||||
}
|
||||
}
|
||||
|
||||
// writeContentToPodFile writes the given content to the specified file.
|
||||
func writeContentToPodFile(namespace, podName, filePath, content string) error {
|
||||
_, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName,
|
||||
"--", "/bin/sh", "-c", fmt.Sprintf("echo '%s' > %s", content, filePath))
|
||||
return err
|
||||
}
|
||||
|
||||
// expectFileContentToMatch checks if a given file contains the specified
|
||||
// content, else fails.
|
||||
func expectFileContentToMatch(namespace, podName, filePath, content string) {
|
||||
_, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName,
|
||||
"--", "/bin/sh", "-c", fmt.Sprintf("grep '%s' %s", content, filePath))
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed to match content of file: %q on the pod: %q", filePath, podName))
|
||||
}
|
||||
|
||||
// expectFileContentsToMatch checks if the given contents match the ones present
|
||||
// in corresponding files on respective Pods, else fails.
|
||||
func expectFileContentsToMatch(namespace string, pods []*v1.Pod, filePaths []string, contents []string) {
|
||||
for i, pod := range pods {
|
||||
podName := pod.Name
|
||||
filePath := filePaths[i]
|
||||
By(fmt.Sprintf("Matching file content for %v on pod %v", filePath, podName))
|
||||
expectFileContentToMatch(namespace, podName, filePath, contents[i])
|
||||
}
|
||||
}
|
131
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go
generated
vendored
Normal file
131
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go
generated
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
/*
|
||||
Tests to verify volume provisioning on a clustered datastore
|
||||
1. Static provisioning
|
||||
2. Dynamic provisioning
|
||||
3. Dynamic provisioning with spbm policy
|
||||
|
||||
This test reads env
|
||||
1. CLUSTER_DATASTORE which should be set to clustered datastore
|
||||
2. VSPHERE_SPBM_POLICY_DS_CLUSTER which should be set to a tag based spbm policy tagged to a clustered datastore
|
||||
*/
|
||||
var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:vsphere]", func() {
|
||||
f := framework.NewDefaultFramework("volume-provision")
|
||||
|
||||
var (
|
||||
client clientset.Interface
|
||||
namespace string
|
||||
scParameters map[string]string
|
||||
clusterDatastore string
|
||||
nodeInfo *NodeInfo
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("vsphere")
|
||||
Bootstrap(f)
|
||||
client = f.ClientSet
|
||||
namespace = f.Namespace.Name
|
||||
nodeInfo = GetReadySchedulableRandomNodeInfo()
|
||||
scParameters = make(map[string]string)
|
||||
clusterDatastore = GetAndExpectStringEnvVar(VCPClusterDatastore)
|
||||
})
|
||||
|
||||
/*
|
||||
Steps:
|
||||
1. Create volume options with datastore to be a clustered datastore
|
||||
2. Create a vsphere volume
|
||||
3. Create podspec with volume path. Create a corresponding pod
|
||||
4. Verify disk is attached
|
||||
5. Delete the pod and wait for the disk to be detached
|
||||
6. Delete the volume
|
||||
*/
|
||||
|
||||
It("verify static provisioning on clustered datastore", func() {
|
||||
var volumePath string
|
||||
|
||||
By("creating a test vsphere volume")
|
||||
volumeOptions := new(VolumeOptions)
|
||||
volumeOptions.CapacityKB = 2097152
|
||||
volumeOptions.Name = "e2e-vmdk-" + namespace
|
||||
volumeOptions.Datastore = clusterDatastore
|
||||
|
||||
volumePath, err := nodeInfo.VSphere.CreateVolume(volumeOptions, nodeInfo.DataCenterRef)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
defer func() {
|
||||
By("Deleting the vsphere volume")
|
||||
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
|
||||
}()
|
||||
|
||||
podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nil, nil)
|
||||
|
||||
By("Creating pod")
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(podspec)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("Waiting for pod to be ready")
|
||||
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
|
||||
|
||||
// get fresh pod info
|
||||
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
nodeName := pod.Spec.NodeName
|
||||
|
||||
By("Verifying volume is attached")
|
||||
expectVolumeToBeAttached(nodeName, volumePath)
|
||||
|
||||
By("Deleting pod")
|
||||
err = framework.DeletePodWithWait(f, client, pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Waiting for volumes to be detached from the node")
|
||||
err = waitForVSphereDiskToDetach(volumePath, nodeName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
/*
|
||||
Steps:
|
||||
1. Create storage class parameter and specify datastore to be a clustered datastore name
|
||||
2. invokeValidPolicyTest - util to do e2e dynamic provision test
|
||||
*/
|
||||
It("verify dynamic provision with default parameter on clustered datastore", func() {
|
||||
scParameters[Datastore] = clusterDatastore
|
||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||
})
|
||||
|
||||
/*
|
||||
Steps:
|
||||
1. Create storage class parameter and specify storage policy to be a tag based spbm policy
|
||||
2. invokeValidPolicyTest - util to do e2e dynamic provision test
|
||||
*/
|
||||
It("verify dynamic provision with spbm policy on clustered datastore", func() {
|
||||
policyDatastoreCluster := GetAndExpectStringEnvVar(SPBMPolicyDataStoreCluster)
|
||||
scParameters[SpbmStoragePolicy] = policyDatastoreCluster
|
||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||
})
|
||||
})
|
97
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_datastore.go
generated
vendored
Normal file
97
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_datastore.go
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
InvalidDatastore = "invalidDatastore"
|
||||
DatastoreSCName = "datastoresc"
|
||||
)
|
||||
|
||||
/*
|
||||
Test to verify datastore specified in storage-class is being honored while volume creation.
|
||||
|
||||
Steps
|
||||
1. Create StorageClass with invalid datastore.
|
||||
2. Create PVC which uses the StorageClass created in step 1.
|
||||
3. Expect the PVC to fail.
|
||||
4. Verify the error returned on PVC failure is the correct.
|
||||
*/
|
||||
|
||||
var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", func() {
|
||||
f := framework.NewDefaultFramework("volume-datastore")
|
||||
var (
|
||||
client clientset.Interface
|
||||
namespace string
|
||||
scParameters map[string]string
|
||||
)
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("vsphere")
|
||||
Bootstrap(f)
|
||||
client = f.ClientSet
|
||||
namespace = f.Namespace.Name
|
||||
scParameters = make(map[string]string)
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
if !(len(nodeList.Items) > 0) {
|
||||
framework.Failf("Unable to find ready and schedulable Node")
|
||||
}
|
||||
})
|
||||
|
||||
It("verify dynamically provisioned pv using storageclass fails on an invalid datastore", func() {
|
||||
By("Invoking Test for invalid datastore")
|
||||
scParameters[Datastore] = InvalidDatastore
|
||||
scParameters[DiskFormat] = ThinDisk
|
||||
err := invokeInvalidDatastoreTestNeg(client, namespace, scParameters)
|
||||
Expect(err).To(HaveOccurred())
|
||||
errorMsg := `Failed to provision volume with StorageClass \"` + DatastoreSCName + `\": The specified datastore ` + InvalidDatastore + ` is not a shared datastore across node VMs`
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
Expect(err).NotTo(HaveOccurred(), errorMsg)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error {
|
||||
By("Creating Storage Class With Invalid Datastore")
|
||||
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DatastoreSCName, scParameters))
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
|
||||
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
|
||||
|
||||
By("Creating PVC using the Storage Class")
|
||||
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
|
||||
|
||||
By("Expect claim to fail provisioning volume")
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{})
|
||||
return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)
|
||||
}
|
212
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_diskformat.go
generated
vendored
Normal file
212
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_diskformat.go
generated
vendored
Normal file
@@ -0,0 +1,212 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
/*
|
||||
Test to verify diskformat specified in storage-class is being honored while volume creation.
|
||||
Valid and supported options are eagerzeroedthick, zeroedthick and thin
|
||||
|
||||
Steps
|
||||
1. Create StorageClass with diskformat set to valid type
|
||||
2. Create PVC which uses the StorageClass created in step 1.
|
||||
3. Wait for PV to be provisioned.
|
||||
4. Wait for PVC's status to become Bound
|
||||
5. Create pod using PVC on specific node.
|
||||
6. Wait for Disk to be attached to the node.
|
||||
7. Get node VM's devices and find PV's Volume Disk.
|
||||
8. Get Backing Info of the Volume Disk and obtain EagerlyScrub and ThinProvisioned
|
||||
9. Based on the value of EagerlyScrub and ThinProvisioned, verify diskformat is correct.
|
||||
10. Delete pod and Wait for Volume Disk to be detached from the Node.
|
||||
11. Delete PVC, PV and Storage Class
|
||||
*/
|
||||
|
||||
var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() {
|
||||
f := framework.NewDefaultFramework("volume-disk-format")
|
||||
const (
|
||||
NodeLabelKey = "vsphere_e2e_label_volume_diskformat"
|
||||
)
|
||||
var (
|
||||
client clientset.Interface
|
||||
namespace string
|
||||
nodeName string
|
||||
isNodeLabeled bool
|
||||
nodeKeyValueLabel map[string]string
|
||||
nodeLabelValue string
|
||||
)
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("vsphere")
|
||||
Bootstrap(f)
|
||||
client = f.ClientSet
|
||||
namespace = f.Namespace.Name
|
||||
if !isNodeLabeled {
|
||||
nodeName = GetReadySchedulableRandomNodeInfo().Name
|
||||
nodeLabelValue = "vsphere_e2e_" + string(uuid.NewUUID())
|
||||
nodeKeyValueLabel = make(map[string]string)
|
||||
nodeKeyValueLabel[NodeLabelKey] = nodeLabelValue
|
||||
framework.AddOrUpdateLabelOnNode(client, nodeName, NodeLabelKey, nodeLabelValue)
|
||||
isNodeLabeled = true
|
||||
}
|
||||
})
|
||||
framework.AddCleanupAction(func() {
|
||||
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
|
||||
if len(namespace) > 0 && len(nodeLabelValue) > 0 {
|
||||
framework.RemoveLabelOffNode(client, nodeName, NodeLabelKey)
|
||||
}
|
||||
})
|
||||
|
||||
It("verify disk format type - eagerzeroedthick is honored for dynamically provisioned pv using storageclass", func() {
|
||||
By("Invoking Test for diskformat: eagerzeroedthick")
|
||||
invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "eagerzeroedthick")
|
||||
})
|
||||
It("verify disk format type - zeroedthick is honored for dynamically provisioned pv using storageclass", func() {
|
||||
By("Invoking Test for diskformat: zeroedthick")
|
||||
invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "zeroedthick")
|
||||
})
|
||||
It("verify disk format type - thin is honored for dynamically provisioned pv using storageclass", func() {
|
||||
By("Invoking Test for diskformat: thin")
|
||||
invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "thin")
|
||||
})
|
||||
})
|
||||
|
||||
func invokeTest(f *framework.Framework, client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, diskFormat string) {
|
||||
|
||||
framework.Logf("Invoking Test for DiskFomat: %s", diskFormat)
|
||||
scParameters := make(map[string]string)
|
||||
scParameters["diskformat"] = diskFormat
|
||||
|
||||
By("Creating Storage Class With DiskFormat")
|
||||
storageClassSpec := getVSphereStorageClassSpec("thinsc", scParameters)
|
||||
storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
|
||||
|
||||
By("Creating PVC using the Storage Class")
|
||||
pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)
|
||||
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(pvclaimSpec)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
defer func() {
|
||||
client.CoreV1().PersistentVolumeClaims(namespace).Delete(pvclaimSpec.Name, nil)
|
||||
}()
|
||||
|
||||
By("Waiting for claim to be in bound phase")
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Get new copy of the claim
|
||||
pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(pvclaim.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Get the bound PV
|
||||
pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
/*
|
||||
PV is required to be attached to the Node. so that using govmomi API we can grab Disk's Backing Info
|
||||
to check EagerlyScrub and ThinProvisioned property
|
||||
*/
|
||||
By("Creating pod to attach PV to the node")
|
||||
// Create pod to attach Volume to Node
|
||||
podSpec := getVSpherePodSpecWithClaim(pvclaim.Name, nodeKeyValueLabel, "while true ; do sleep 2 ; done")
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(podSpec)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Waiting for pod to be running")
|
||||
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
|
||||
|
||||
isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
|
||||
Expect(isAttached).To(BeTrue())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Verify Disk Format")
|
||||
Expect(verifyDiskFormat(client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)).To(BeTrue(), "DiskFormat Verification Failed")
|
||||
|
||||
var volumePaths []string
|
||||
volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath)
|
||||
|
||||
By("Delete pod and wait for volume to be detached from node")
|
||||
deletePodAndWaitForVolumeToDetach(f, client, pod, nodeName, volumePaths)
|
||||
|
||||
}
|
||||
|
||||
func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath string, diskFormat string) bool {
|
||||
By("Verifing disk format")
|
||||
eagerlyScrub := false
|
||||
thinProvisioned := false
|
||||
diskFound := false
|
||||
pvvmdkfileName := filepath.Base(pvVolumePath) + filepath.Ext(pvVolumePath)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
|
||||
vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
|
||||
vmDevices, err := vm.Device(ctx)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
disks := vmDevices.SelectByType((*types.VirtualDisk)(nil))
|
||||
|
||||
for _, disk := range disks {
|
||||
backing := disk.GetVirtualDevice().Backing.(*types.VirtualDiskFlatVer2BackingInfo)
|
||||
backingFileName := filepath.Base(backing.FileName) + filepath.Ext(backing.FileName)
|
||||
if backingFileName == pvvmdkfileName {
|
||||
diskFound = true
|
||||
if backing.EagerlyScrub != nil {
|
||||
eagerlyScrub = *backing.EagerlyScrub
|
||||
}
|
||||
if backing.ThinProvisioned != nil {
|
||||
thinProvisioned = *backing.ThinProvisioned
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
Expect(diskFound).To(BeTrue(), "Failed to find disk")
|
||||
isDiskFormatCorrect := false
|
||||
if diskFormat == "eagerzeroedthick" {
|
||||
if eagerlyScrub == true && thinProvisioned == false {
|
||||
isDiskFormatCorrect = true
|
||||
}
|
||||
} else if diskFormat == "zeroedthick" {
|
||||
if eagerlyScrub == false && thinProvisioned == false {
|
||||
isDiskFormatCorrect = true
|
||||
}
|
||||
} else if diskFormat == "thin" {
|
||||
if eagerlyScrub == false && thinProvisioned == true {
|
||||
isDiskFormatCorrect = true
|
||||
}
|
||||
}
|
||||
return isDiskFormatCorrect
|
||||
}
|
95
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_disksize.go
generated
vendored
Normal file
95
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_disksize.go
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
DiskSizeSCName = "disksizesc"
|
||||
)
|
||||
|
||||
/*
|
||||
Test to verify disk size specified in PVC is being honored while volume creation.
|
||||
|
||||
Steps
|
||||
1. Create StorageClass.
|
||||
2. Create PVC with invalid disk size which uses the StorageClass created in step 1.
|
||||
3. Expect the PVC to fail.
|
||||
4. Verify the error returned on PVC failure is the correct.
|
||||
*/
|
||||
|
||||
var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() {
|
||||
f := framework.NewDefaultFramework("volume-disksize")
|
||||
var (
|
||||
client clientset.Interface
|
||||
namespace string
|
||||
scParameters map[string]string
|
||||
datastore string
|
||||
)
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("vsphere")
|
||||
Bootstrap(f)
|
||||
client = f.ClientSet
|
||||
namespace = f.Namespace.Name
|
||||
scParameters = make(map[string]string)
|
||||
datastore = GetAndExpectStringEnvVar(StorageClassDatastoreName)
|
||||
})
|
||||
|
||||
It("verify dynamically provisioned pv using storageclass with an invalid disk size fails", func() {
|
||||
By("Invoking Test for invalid disk size")
|
||||
scParameters[Datastore] = datastore
|
||||
scParameters[DiskFormat] = ThinDisk
|
||||
diskSize := "1"
|
||||
err := invokeInvalidDiskSizeTestNeg(client, namespace, scParameters, diskSize)
|
||||
Expect(err).To(HaveOccurred())
|
||||
errorMsg := `Failed to provision volume with StorageClass \"` + DiskSizeSCName + `\": A specified parameter was not correct`
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
Expect(err).NotTo(HaveOccurred(), errorMsg)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
func invokeInvalidDiskSizeTestNeg(client clientset.Interface, namespace string, scParameters map[string]string, diskSize string) error {
|
||||
By("Creating Storage Class With invalid disk size")
|
||||
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DiskSizeSCName, scParameters))
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
|
||||
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
|
||||
|
||||
By("Creating PVC using the Storage Class")
|
||||
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, diskSize, storageclass))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
|
||||
|
||||
By("Expect claim to fail provisioning volume")
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{})
|
||||
return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)
|
||||
}
|
189
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_fstype.go
generated
vendored
Normal file
189
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_fstype.go
generated
vendored
Normal file
@@ -0,0 +1,189 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
Ext4FSType = "ext4"
|
||||
Ext3FSType = "ext3"
|
||||
InvalidFSType = "ext10"
|
||||
ExecCommand = "/bin/df -T /mnt/volume1 | /bin/awk 'FNR == 2 {print $2}' > /mnt/volume1/fstype && while true ; do sleep 2 ; done"
|
||||
)
|
||||
|
||||
/*
|
||||
Test to verify fstype specified in storage-class is being honored after volume creation.
|
||||
|
||||
Steps
|
||||
1. Create StorageClass with fstype set to valid type (default case included).
|
||||
2. Create PVC which uses the StorageClass created in step 1.
|
||||
3. Wait for PV to be provisioned.
|
||||
4. Wait for PVC's status to become Bound.
|
||||
5. Create pod using PVC on specific node.
|
||||
6. Wait for Disk to be attached to the node.
|
||||
7. Execute command in the pod to get fstype.
|
||||
8. Delete pod and Wait for Volume Disk to be detached from the Node.
|
||||
9. Delete PVC, PV and Storage Class.
|
||||
|
||||
Test to verify if an invalid fstype specified in storage class fails pod creation.
|
||||
|
||||
Steps
|
||||
1. Create StorageClass with inavlid.
|
||||
2. Create PVC which uses the StorageClass created in step 1.
|
||||
3. Wait for PV to be provisioned.
|
||||
4. Wait for PVC's status to become Bound.
|
||||
5. Create pod using PVC.
|
||||
6. Verify if the pod creation fails.
|
||||
7. Verify if the MountVolume.MountDevice fails because it is unable to find the file system executable file on the node.
|
||||
*/
|
||||
|
||||
var _ = utils.SIGDescribe("Volume FStype [Feature:vsphere]", func() {
|
||||
f := framework.NewDefaultFramework("volume-fstype")
|
||||
var (
|
||||
client clientset.Interface
|
||||
namespace string
|
||||
)
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("vsphere")
|
||||
Bootstrap(f)
|
||||
client = f.ClientSet
|
||||
namespace = f.Namespace.Name
|
||||
Expect(GetReadySchedulableNodeInfos()).NotTo(BeEmpty())
|
||||
})
|
||||
|
||||
It("verify fstype - ext3 formatted volume", func() {
|
||||
By("Invoking Test for fstype: ext3")
|
||||
invokeTestForFstype(f, client, namespace, Ext3FSType, Ext3FSType)
|
||||
})
|
||||
|
||||
It("verify fstype - default value should be ext4", func() {
|
||||
By("Invoking Test for fstype: Default Value - ext4")
|
||||
invokeTestForFstype(f, client, namespace, "", Ext4FSType)
|
||||
})
|
||||
|
||||
It("verify invalid fstype", func() {
|
||||
By("Invoking Test for fstype: invalid Value")
|
||||
invokeTestForInvalidFstype(f, client, namespace, InvalidFSType)
|
||||
})
|
||||
})
|
||||
|
||||
func invokeTestForFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string, expectedContent string) {
|
||||
framework.Logf("Invoking Test for fstype: %s", fstype)
|
||||
scParameters := make(map[string]string)
|
||||
scParameters["fstype"] = fstype
|
||||
|
||||
// Create Persistent Volume
|
||||
By("Creating Storage Class With Fstype")
|
||||
pvclaim, persistentvolumes := createVolume(client, namespace, scParameters)
|
||||
|
||||
// Create Pod and verify the persistent volume is accessible
|
||||
pod := createPodAndVerifyVolumeAccessible(client, namespace, pvclaim, persistentvolumes)
|
||||
_, err := framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Detach and delete volume
|
||||
detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
|
||||
err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
|
||||
Expect(err).To(BeNil())
|
||||
}
|
||||
|
||||
func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string) {
|
||||
scParameters := make(map[string]string)
|
||||
scParameters["fstype"] = fstype
|
||||
|
||||
// Create Persistent Volume
|
||||
By("Creating Storage Class With Invalid Fstype")
|
||||
pvclaim, persistentvolumes := createVolume(client, namespace, scParameters)
|
||||
|
||||
By("Creating pod to attach PV to the node")
|
||||
var pvclaims []*v1.PersistentVolumeClaim
|
||||
pvclaims = append(pvclaims, pvclaim)
|
||||
// Create pod to attach Volume to Node
|
||||
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
eventList, err := client.CoreV1().Events(namespace).List(metav1.ListOptions{})
|
||||
|
||||
// Detach and delete volume
|
||||
detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
|
||||
err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
Expect(eventList.Items).NotTo(BeEmpty())
|
||||
errorMsg := `MountVolume.MountDevice failed for volume "` + persistentvolumes[0].Name + `" : executable file not found`
|
||||
isFound := false
|
||||
for _, item := range eventList.Items {
|
||||
if strings.Contains(item.Message, errorMsg) {
|
||||
isFound = true
|
||||
}
|
||||
}
|
||||
Expect(isFound).To(BeTrue(), "Unable to verify MountVolume.MountDevice failure")
|
||||
}
|
||||
|
||||
func createVolume(client clientset.Interface, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) {
|
||||
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("fstype", scParameters))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
|
||||
|
||||
By("Creating PVC using the Storage Class")
|
||||
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
var pvclaims []*v1.PersistentVolumeClaim
|
||||
pvclaims = append(pvclaims, pvclaim)
|
||||
By("Waiting for claim to be in bound phase")
|
||||
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return pvclaim, persistentvolumes
|
||||
}
|
||||
|
||||
func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace string, pvclaim *v1.PersistentVolumeClaim, persistentvolumes []*v1.PersistentVolume) *v1.Pod {
|
||||
var pvclaims []*v1.PersistentVolumeClaim
|
||||
pvclaims = append(pvclaims, pvclaim)
|
||||
By("Creating pod to attach PV to the node")
|
||||
// Create pod to attach Volume to Node
|
||||
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Asserts: Right disk is attached to the pod
|
||||
By("Verify the volume is accessible and available in the pod")
|
||||
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
|
||||
return pod
|
||||
}
|
||||
|
||||
// detachVolume delete the volume passed in the argument and wait until volume is detached from the node,
|
||||
func detachVolume(f *framework.Framework, client clientset.Interface, pod *v1.Pod, volPath string) {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).To(BeNil())
|
||||
nodeName := pod.Spec.NodeName
|
||||
By("Deleting pod")
|
||||
framework.DeletePodWithWait(f, client, pod)
|
||||
|
||||
By("Waiting for volumes to be detached from the node")
|
||||
waitForVSphereDiskToDetach(volPath, nodeName)
|
||||
}
|
138
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_master_restart.go
generated
vendored
Normal file
138
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_master_restart.go
generated
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
/*
|
||||
Test to verify volume remains attached after kubelet restart on master node
|
||||
For the number of schedulable nodes,
|
||||
1. Create a volume with default volume options
|
||||
2. Create a Pod
|
||||
3. Verify the volume is attached
|
||||
4. Restart the kubelet on master node
|
||||
5. Verify again that the volume is attached
|
||||
6. Delete the pod and wait for the volume to be detached
|
||||
7. Delete the volume
|
||||
*/
|
||||
var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("restart-master")
|
||||
|
||||
const labelKey = "vsphere_e2e_label"
|
||||
var (
|
||||
client clientset.Interface
|
||||
namespace string
|
||||
volumePaths []string
|
||||
pods []*v1.Pod
|
||||
numNodes int
|
||||
nodeKeyValueLabelList []map[string]string
|
||||
nodeNameList []string
|
||||
nodeInfo *NodeInfo
|
||||
)
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("vsphere")
|
||||
Bootstrap(f)
|
||||
client = f.ClientSet
|
||||
namespace = f.Namespace.Name
|
||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
|
||||
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(client)
|
||||
numNodes = len(nodes.Items)
|
||||
if numNodes < 2 {
|
||||
framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
|
||||
}
|
||||
nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name)
|
||||
for i := 0; i < numNodes; i++ {
|
||||
nodeName := nodes.Items[i].Name
|
||||
nodeNameList = append(nodeNameList, nodeName)
|
||||
nodeLabelValue := "vsphere_e2e_" + string(uuid.NewUUID())
|
||||
nodeKeyValueLabel := make(map[string]string)
|
||||
nodeKeyValueLabel[labelKey] = nodeLabelValue
|
||||
nodeKeyValueLabelList = append(nodeKeyValueLabelList, nodeKeyValueLabel)
|
||||
framework.AddOrUpdateLabelOnNode(client, nodeName, labelKey, nodeLabelValue)
|
||||
}
|
||||
})
|
||||
|
||||
It("verify volume remains attached after master kubelet restart", func() {
|
||||
// Create pod on each node
|
||||
for i := 0; i < numNodes; i++ {
|
||||
By(fmt.Sprintf("%d: Creating a test vsphere volume", i))
|
||||
volumePath, err := nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
volumePaths = append(volumePaths, volumePath)
|
||||
|
||||
By(fmt.Sprintf("Creating pod %d on node %v", i, nodeNameList[i]))
|
||||
podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nodeKeyValueLabelList[i], nil)
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(podspec)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer framework.DeletePodWithWait(f, client, pod)
|
||||
|
||||
By("Waiting for pod to be ready")
|
||||
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
|
||||
|
||||
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pods = append(pods, pod)
|
||||
|
||||
nodeName := pod.Spec.NodeName
|
||||
By(fmt.Sprintf("Verify volume %s is attached to the pod %s", volumePath, nodeName))
|
||||
expectVolumeToBeAttached(nodeName, volumePath)
|
||||
}
|
||||
|
||||
By("Restarting kubelet on master node")
|
||||
masterAddress := framework.GetMasterHost() + ":22"
|
||||
err := framework.RestartKubelet(masterAddress)
|
||||
Expect(err).NotTo(HaveOccurred(), "Unable to restart kubelet on master node")
|
||||
|
||||
By("Verifying the kubelet on master node is up")
|
||||
err = framework.WaitForKubeletUp(masterAddress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
for i, pod := range pods {
|
||||
volumePath := volumePaths[i]
|
||||
nodeName := pod.Spec.NodeName
|
||||
|
||||
By(fmt.Sprintf("After master restart, verify volume %v is attached to the pod %v", volumePath, nodeName))
|
||||
expectVolumeToBeAttached(nodeName, volumePath)
|
||||
|
||||
By(fmt.Sprintf("Deleting pod on node %s", nodeName))
|
||||
err = framework.DeletePodWithWait(f, client, pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Waiting for volume %s to be detached from the node %s", volumePath, nodeName))
|
||||
err = waitForVSphereDiskToDetach(volumePath, nodeName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Deleting volume %s", volumePath))
|
||||
err = nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
})
|
||||
})
|
119
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_node_delete.go
generated
vendored
Normal file
119
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_node_delete.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/vmware/govmomi/object"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("node-unregister")
|
||||
var (
|
||||
client clientset.Interface
|
||||
namespace string
|
||||
workingDir string
|
||||
err error
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("vsphere")
|
||||
Bootstrap(f)
|
||||
client = f.ClientSet
|
||||
namespace = f.Namespace.Name
|
||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
workingDir = os.Getenv("VSPHERE_WORKING_DIR")
|
||||
Expect(workingDir).NotTo(BeEmpty())
|
||||
|
||||
})
|
||||
|
||||
It("node unregister", func() {
|
||||
By("Get total Ready nodes")
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(len(nodeList.Items) > 1).To(BeTrue(), "At least 2 nodes are required for this test")
|
||||
|
||||
totalNodesCount := len(nodeList.Items)
|
||||
nodeVM := nodeList.Items[0]
|
||||
|
||||
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeVM.ObjectMeta.Name)
|
||||
vmObject := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
|
||||
|
||||
// Find VM .vmx file path, host, resource pool.
|
||||
// They are required to register a node VM to VC
|
||||
vmxFilePath := getVMXFilePath(vmObject)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
vmHost, err := vmObject.HostSystem(ctx)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
vmPool, err := vmObject.ResourcePool(ctx)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Unregister Node VM
|
||||
By("Unregister a node VM")
|
||||
unregisterNodeVM(nodeVM.ObjectMeta.Name, vmObject)
|
||||
|
||||
// Ready nodes should be 1 less
|
||||
By("Verifying the ready node counts")
|
||||
Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount-1)).To(BeTrue(), "Unable to verify expected ready node count")
|
||||
|
||||
nodeList = framework.GetReadySchedulableNodesOrDie(client)
|
||||
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
|
||||
|
||||
var nodeNameList []string
|
||||
for _, node := range nodeList.Items {
|
||||
nodeNameList = append(nodeNameList, node.ObjectMeta.Name)
|
||||
}
|
||||
Expect(nodeNameList).NotTo(ContainElement(nodeVM.ObjectMeta.Name))
|
||||
|
||||
// Register Node VM
|
||||
By("Register back the node VM")
|
||||
registerNodeVM(nodeVM.ObjectMeta.Name, workingDir, vmxFilePath, vmPool, vmHost)
|
||||
|
||||
// Ready nodes should be equal to earlier count
|
||||
By("Verifying the ready node counts")
|
||||
Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount)).To(BeTrue(), "Unable to verify expected ready node count")
|
||||
|
||||
nodeList = framework.GetReadySchedulableNodesOrDie(client)
|
||||
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
|
||||
|
||||
nodeNameList = nodeNameList[:0]
|
||||
for _, node := range nodeList.Items {
|
||||
nodeNameList = append(nodeNameList, node.ObjectMeta.Name)
|
||||
}
|
||||
Expect(nodeNameList).To(ContainElement(nodeVM.ObjectMeta.Name))
|
||||
|
||||
// Sanity test that pod provisioning works
|
||||
By("Sanity check for volume lifecycle")
|
||||
scParameters := make(map[string]string)
|
||||
storagePolicy := os.Getenv("VSPHERE_SPBM_GOLD_POLICY")
|
||||
Expect(storagePolicy).NotTo(BeEmpty(), "Please set VSPHERE_SPBM_GOLD_POLICY system environment")
|
||||
scParameters[SpbmStoragePolicy] = storagePolicy
|
||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||
})
|
||||
})
|
184
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go
generated
vendored
Normal file
184
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go
generated
vendored
Normal file
@@ -0,0 +1,184 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/vmware/govmomi/object"
|
||||
vimtypes "github.com/vmware/govmomi/vim25/types"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
/*
|
||||
Test to verify volume status after node power off:
|
||||
1. Verify the pod got provisioned on a different node with volume attached to it
|
||||
2. Verify the volume is detached from the powered off node
|
||||
*/
|
||||
var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("node-poweroff")
|
||||
var (
|
||||
client clientset.Interface
|
||||
namespace string
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("vsphere")
|
||||
Bootstrap(f)
|
||||
client = f.ClientSet
|
||||
namespace = f.Namespace.Name
|
||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
|
||||
Expect(len(nodeList.Items) > 1).To(BeTrue(), "At least 2 nodes are required for this test")
|
||||
})
|
||||
|
||||
/*
|
||||
Steps:
|
||||
1. Create a StorageClass
|
||||
2. Create a PVC with the StorageClass
|
||||
3. Create a Deployment with 1 replica, using the PVC
|
||||
4. Verify the pod got provisioned on a node
|
||||
5. Verify the volume is attached to the node
|
||||
6. Power off the node where pod got provisioned
|
||||
7. Verify the pod got provisioned on a different node
|
||||
8. Verify the volume is attached to the new node
|
||||
9. Verify the volume is detached from the old node
|
||||
10. Delete the Deployment and wait for the volume to be detached
|
||||
11. Delete the PVC
|
||||
12. Delete the StorageClass
|
||||
*/
|
||||
It("verify volume status after node power off", func() {
|
||||
By("Creating a Storage Class")
|
||||
storageClassSpec := getVSphereStorageClassSpec("test-sc", nil)
|
||||
storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec)
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
|
||||
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
|
||||
|
||||
By("Creating PVC using the Storage Class")
|
||||
pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "1Gi", storageclass)
|
||||
pvclaim, err := framework.CreatePVC(client, namespace, pvclaimSpec)
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create PVC with err: %v", err))
|
||||
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
|
||||
|
||||
By("Waiting for PVC to be in bound phase")
|
||||
pvclaims := []*v1.PersistentVolumeClaim{pvclaim}
|
||||
pvs, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to wait until PVC phase set to bound: %v", err))
|
||||
volumePath := pvs[0].Spec.VsphereVolume.VolumePath
|
||||
|
||||
By("Creating a Deployment")
|
||||
deployment, err := framework.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "")
|
||||
defer client.ExtensionsV1beta1().Deployments(namespace).Delete(deployment.Name, &metav1.DeleteOptions{})
|
||||
|
||||
By("Get pod from the deployement")
|
||||
podList, err := framework.GetPodsForDeployment(client, deployment)
|
||||
Expect(podList.Items).NotTo(BeEmpty())
|
||||
pod := podList.Items[0]
|
||||
node1 := pod.Spec.NodeName
|
||||
|
||||
By(fmt.Sprintf("Verify disk is attached to the node: %v", node1))
|
||||
isAttached, err := diskIsAttached(volumePath, node1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(isAttached).To(BeTrue(), "Disk is not attached to the node")
|
||||
|
||||
By(fmt.Sprintf("Power off the node: %v", node1))
|
||||
|
||||
nodeInfo := TestContext.NodeMapper.GetNodeInfo(node1)
|
||||
vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
_, err = vm.PowerOff(ctx)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer vm.PowerOn(ctx)
|
||||
|
||||
err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOff)
|
||||
Expect(err).NotTo(HaveOccurred(), "Unable to power off the node")
|
||||
|
||||
// Waiting for the pod to be failed over to a different node
|
||||
node2, err := waitForPodToFailover(client, deployment, node1)
|
||||
Expect(err).NotTo(HaveOccurred(), "Pod did not fail over to a different node")
|
||||
|
||||
By(fmt.Sprintf("Waiting for disk to be attached to the new node: %v", node2))
|
||||
err = waitForVSphereDiskToAttach(volumePath, node2)
|
||||
Expect(err).NotTo(HaveOccurred(), "Disk is not attached to the node")
|
||||
|
||||
By(fmt.Sprintf("Waiting for disk to be detached from the previous node: %v", node1))
|
||||
err = waitForVSphereDiskToDetach(volumePath, node1)
|
||||
Expect(err).NotTo(HaveOccurred(), "Disk is not detached from the node")
|
||||
|
||||
By(fmt.Sprintf("Power on the previous node: %v", node1))
|
||||
vm.PowerOn(ctx)
|
||||
err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOn)
|
||||
Expect(err).NotTo(HaveOccurred(), "Unable to power on the node")
|
||||
})
|
||||
})
|
||||
|
||||
// Wait until the pod failed over to a different node, or time out after 3 minutes
|
||||
func waitForPodToFailover(client clientset.Interface, deployment *apps.Deployment, oldNode string) (string, error) {
|
||||
var (
|
||||
err error
|
||||
newNode string
|
||||
timeout = 3 * time.Minute
|
||||
pollTime = 10 * time.Second
|
||||
)
|
||||
|
||||
err = wait.Poll(pollTime, timeout, func() (bool, error) {
|
||||
newNode, err = getNodeForDeployment(client, deployment)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
if newNode != oldNode {
|
||||
framework.Logf("The pod has been failed over from %q to %q", oldNode, newNode)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
framework.Logf("Waiting for pod to be failed over from %q", oldNode)
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if err == wait.ErrWaitTimeout {
|
||||
framework.Logf("Time out after waiting for %v", timeout)
|
||||
}
|
||||
framework.Logf("Pod did not fail over from %q with error: %v", oldNode, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return getNodeForDeployment(client, deployment)
|
||||
}
|
||||
|
||||
// getNodeForDeployment returns node name for the Deployment
|
||||
func getNodeForDeployment(client clientset.Interface, deployment *apps.Deployment) (string, error) {
|
||||
podList, err := framework.GetPodsForDeployment(client, deployment)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return podList.Items[0].Spec.NodeName, nil
|
||||
}
|
120
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go
generated
vendored
Normal file
120
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go
generated
vendored
Normal file
@@ -0,0 +1,120 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
/*
|
||||
Test to perform Disk Ops storm.
|
||||
|
||||
Steps
|
||||
1. Create storage class for thin Provisioning.
|
||||
2. Create 30 PVCs using above storage class in annotation, requesting 2 GB files.
|
||||
3. Wait until all disks are ready and all PVs and PVCs get bind. (CreateVolume storm)
|
||||
4. Create pod to mount volumes using PVCs created in step 2. (AttachDisk storm)
|
||||
5. Wait for pod status to be running.
|
||||
6. Verify all volumes accessible and available in the pod.
|
||||
7. Delete pod.
|
||||
8. wait until volumes gets detached. (DetachDisk storm)
|
||||
9. Delete all PVCs. This should delete all Disks. (DeleteVolume storm)
|
||||
10. Delete storage class.
|
||||
*/
|
||||
|
||||
var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
|
||||
f := framework.NewDefaultFramework("volume-ops-storm")
|
||||
const DEFAULT_VOLUME_OPS_SCALE = 30
|
||||
var (
|
||||
client clientset.Interface
|
||||
namespace string
|
||||
storageclass *storage.StorageClass
|
||||
pvclaims []*v1.PersistentVolumeClaim
|
||||
persistentvolumes []*v1.PersistentVolume
|
||||
err error
|
||||
volume_ops_scale int
|
||||
)
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("vsphere")
|
||||
Bootstrap(f)
|
||||
client = f.ClientSet
|
||||
namespace = f.Namespace.Name
|
||||
Expect(GetReadySchedulableNodeInfos()).NotTo(BeEmpty())
|
||||
if os.Getenv("VOLUME_OPS_SCALE") != "" {
|
||||
volume_ops_scale, err = strconv.Atoi(os.Getenv("VOLUME_OPS_SCALE"))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
} else {
|
||||
volume_ops_scale = DEFAULT_VOLUME_OPS_SCALE
|
||||
}
|
||||
pvclaims = make([]*v1.PersistentVolumeClaim, volume_ops_scale)
|
||||
})
|
||||
AfterEach(func() {
|
||||
By("Deleting PVCs")
|
||||
for _, claim := range pvclaims {
|
||||
framework.DeletePersistentVolumeClaim(client, claim.Name, namespace)
|
||||
}
|
||||
By("Deleting StorageClass")
|
||||
err = client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should create pod with many volumes and verify no attach call fails", func() {
|
||||
By(fmt.Sprintf("Running test with VOLUME_OPS_SCALE: %v", volume_ops_scale))
|
||||
By("Creating Storage Class")
|
||||
scParameters := make(map[string]string)
|
||||
scParameters["diskformat"] = "thin"
|
||||
storageclass, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("thinsc", scParameters))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating PVCs using the Storage Class")
|
||||
count := 0
|
||||
for count < volume_ops_scale {
|
||||
pvclaims[count], err = framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
count++
|
||||
}
|
||||
|
||||
By("Waiting for all claims to be in bound phase")
|
||||
persistentvolumes, err = framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating pod to attach PVs to the node")
|
||||
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Verify all volumes are accessible and available in the pod")
|
||||
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
|
||||
|
||||
By("Deleting pod")
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, client, pod))
|
||||
|
||||
By("Waiting for volumes to be detached from the node")
|
||||
for _, pv := range persistentvolumes {
|
||||
waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
|
||||
}
|
||||
})
|
||||
})
|
234
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_perf.go
generated
vendored
Normal file
234
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_perf.go
generated
vendored
Normal file
@@ -0,0 +1,234 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
storageV1 "k8s.io/api/storage/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
/* This test calculates latency numbers for volume lifecycle operations
|
||||
1. Create 4 type of storage classes
|
||||
2. Read the total number of volumes to be created and volumes per pod
|
||||
3. Create total PVCs (number of volumes)
|
||||
4. Create Pods with attached volumes per pod
|
||||
5. Verify access to the volumes
|
||||
6. Delete pods and wait for volumes to detach
|
||||
7. Delete the PVCs
|
||||
*/
|
||||
const (
|
||||
SCSIUnitsAvailablePerNode = 55
|
||||
CreateOp = "CreateOp"
|
||||
AttachOp = "AttachOp"
|
||||
DetachOp = "DetachOp"
|
||||
DeleteOp = "DeleteOp"
|
||||
)
|
||||
|
||||
var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() {
|
||||
f := framework.NewDefaultFramework("vcp-performance")
|
||||
|
||||
var (
|
||||
client clientset.Interface
|
||||
namespace string
|
||||
nodeSelectorList []*NodeSelector
|
||||
policyName string
|
||||
datastoreName string
|
||||
volumeCount int
|
||||
volumesPerPod int
|
||||
iterations int
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("vsphere")
|
||||
Bootstrap(f)
|
||||
client = f.ClientSet
|
||||
namespace = f.Namespace.Name
|
||||
|
||||
// Read the environment variables
|
||||
volumeCount = GetAndExpectIntEnvVar(VCPPerfVolumeCount)
|
||||
volumesPerPod = GetAndExpectIntEnvVar(VCPPerfVolumesPerPod)
|
||||
iterations = GetAndExpectIntEnvVar(VCPPerfIterations)
|
||||
|
||||
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
|
||||
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
|
||||
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(client)
|
||||
Expect(len(nodes.Items)).To(BeNumerically(">=", 1), "Requires at least %d nodes (not %d)", 2, len(nodes.Items))
|
||||
|
||||
msg := fmt.Sprintf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), SCSIUnitsAvailablePerNode*len(nodes.Items))
|
||||
Expect(volumeCount).To(BeNumerically("<=", SCSIUnitsAvailablePerNode*len(nodes.Items)), msg)
|
||||
|
||||
msg = fmt.Sprintf("Cannot attach %d volumes per pod. Maximum volumes that can be attached per pod is %d", volumesPerPod, SCSIUnitsAvailablePerNode)
|
||||
Expect(volumesPerPod).To(BeNumerically("<=", SCSIUnitsAvailablePerNode), msg)
|
||||
|
||||
nodeSelectorList = createNodeLabels(client, namespace, nodes)
|
||||
})
|
||||
|
||||
It("vcp performance tests", func() {
|
||||
scList := getTestStorageClasses(client, policyName, datastoreName)
|
||||
defer func(scList []*storageV1.StorageClass) {
|
||||
for _, sc := range scList {
|
||||
client.StorageV1().StorageClasses().Delete(sc.Name, nil)
|
||||
}
|
||||
}(scList)
|
||||
|
||||
sumLatency := make(map[string]float64)
|
||||
for i := 0; i < iterations; i++ {
|
||||
latency := invokeVolumeLifeCyclePerformance(f, client, namespace, scList, volumesPerPod, volumeCount, nodeSelectorList)
|
||||
for key, val := range latency {
|
||||
sumLatency[key] += val
|
||||
}
|
||||
}
|
||||
|
||||
iterations64 := float64(iterations)
|
||||
framework.Logf("Average latency for below operations")
|
||||
framework.Logf("Creating %d PVCs and waiting for bound phase: %v seconds", volumeCount, sumLatency[CreateOp]/iterations64)
|
||||
framework.Logf("Creating %v Pod: %v seconds", volumeCount/volumesPerPod, sumLatency[AttachOp]/iterations64)
|
||||
framework.Logf("Deleting %v Pod and waiting for disk to be detached: %v seconds", volumeCount/volumesPerPod, sumLatency[DetachOp]/iterations64)
|
||||
framework.Logf("Deleting %v PVCs: %v seconds", volumeCount, sumLatency[DeleteOp]/iterations64)
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
func getTestStorageClasses(client clientset.Interface, policyName, datastoreName string) []*storageV1.StorageClass {
|
||||
const (
|
||||
storageclass1 = "sc-default"
|
||||
storageclass2 = "sc-vsan"
|
||||
storageclass3 = "sc-spbm"
|
||||
storageclass4 = "sc-user-specified-ds"
|
||||
)
|
||||
scNames := []string{storageclass1, storageclass2, storageclass3, storageclass4}
|
||||
scArrays := make([]*storageV1.StorageClass, len(scNames))
|
||||
for index, scname := range scNames {
|
||||
// Create vSphere Storage Class
|
||||
By(fmt.Sprintf("Creating Storage Class : %v", scname))
|
||||
var sc *storageV1.StorageClass
|
||||
var err error
|
||||
switch scname {
|
||||
case storageclass1:
|
||||
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass1, nil))
|
||||
case storageclass2:
|
||||
var scVSanParameters map[string]string
|
||||
scVSanParameters = make(map[string]string)
|
||||
scVSanParameters[Policy_HostFailuresToTolerate] = "1"
|
||||
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters))
|
||||
case storageclass3:
|
||||
var scSPBMPolicyParameters map[string]string
|
||||
scSPBMPolicyParameters = make(map[string]string)
|
||||
scSPBMPolicyParameters[SpbmStoragePolicy] = policyName
|
||||
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters))
|
||||
case storageclass4:
|
||||
var scWithDSParameters map[string]string
|
||||
scWithDSParameters = make(map[string]string)
|
||||
scWithDSParameters[Datastore] = datastoreName
|
||||
scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters)
|
||||
sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec)
|
||||
}
|
||||
Expect(sc).NotTo(BeNil())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
scArrays[index] = sc
|
||||
}
|
||||
return scArrays
|
||||
}
|
||||
|
||||
// invokeVolumeLifeCyclePerformance peforms full volume life cycle management and records latency for each operation
|
||||
func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.Interface, namespace string, sc []*storageV1.StorageClass, volumesPerPod int, volumeCount int, nodeSelectorList []*NodeSelector) (latency map[string]float64) {
|
||||
var (
|
||||
totalpvclaims [][]*v1.PersistentVolumeClaim
|
||||
totalpvs [][]*v1.PersistentVolume
|
||||
totalpods []*v1.Pod
|
||||
)
|
||||
nodeVolumeMap := make(map[string][]string)
|
||||
latency = make(map[string]float64)
|
||||
numPods := volumeCount / volumesPerPod
|
||||
|
||||
By(fmt.Sprintf("Creating %d PVCs", volumeCount))
|
||||
start := time.Now()
|
||||
for i := 0; i < numPods; i++ {
|
||||
var pvclaims []*v1.PersistentVolumeClaim
|
||||
for j := 0; j < volumesPerPod; j++ {
|
||||
currsc := sc[((i*numPods)+j)%len(sc)]
|
||||
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", currsc))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pvclaims = append(pvclaims, pvclaim)
|
||||
}
|
||||
totalpvclaims = append(totalpvclaims, pvclaims)
|
||||
}
|
||||
for _, pvclaims := range totalpvclaims {
|
||||
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
totalpvs = append(totalpvs, persistentvolumes)
|
||||
}
|
||||
elapsed := time.Since(start)
|
||||
latency[CreateOp] = elapsed.Seconds()
|
||||
|
||||
By("Creating pod to attach PVs to the node")
|
||||
start = time.Now()
|
||||
for i, pvclaims := range totalpvclaims {
|
||||
nodeSelector := nodeSelectorList[i%len(nodeSelectorList)]
|
||||
pod, err := framework.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
totalpods = append(totalpods, pod)
|
||||
|
||||
defer framework.DeletePodWithWait(f, client, pod)
|
||||
}
|
||||
elapsed = time.Since(start)
|
||||
latency[AttachOp] = elapsed.Seconds()
|
||||
|
||||
for i, pod := range totalpods {
|
||||
verifyVSphereVolumesAccessible(client, pod, totalpvs[i])
|
||||
}
|
||||
|
||||
By("Deleting pods")
|
||||
start = time.Now()
|
||||
for _, pod := range totalpods {
|
||||
err := framework.DeletePodWithWait(f, client, pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
elapsed = time.Since(start)
|
||||
latency[DetachOp] = elapsed.Seconds()
|
||||
|
||||
for i, pod := range totalpods {
|
||||
for _, pv := range totalpvs[i] {
|
||||
nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath)
|
||||
}
|
||||
}
|
||||
|
||||
err := waitForVSphereDisksToDetach(nodeVolumeMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Deleting the PVCs")
|
||||
start = time.Now()
|
||||
for _, pvclaims := range totalpvclaims {
|
||||
for _, pvc := range pvclaims {
|
||||
err = framework.DeletePersistentVolumeClaim(client, pvc.Name, namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
elapsed = time.Since(start)
|
||||
latency[DeleteOp] = elapsed.Seconds()
|
||||
|
||||
return latency
|
||||
}
|
392
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_placement.go
generated
vendored
Normal file
392
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_placement.go
generated
vendored
Normal file
@@ -0,0 +1,392 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
var _ = utils.SIGDescribe("Volume Placement", func() {
|
||||
f := framework.NewDefaultFramework("volume-placement")
|
||||
const (
|
||||
NodeLabelKey = "vsphere_e2e_label_volume_placement"
|
||||
)
|
||||
var (
|
||||
c clientset.Interface
|
||||
ns string
|
||||
volumePaths []string
|
||||
node1Name string
|
||||
node1KeyValueLabel map[string]string
|
||||
node2Name string
|
||||
node2KeyValueLabel map[string]string
|
||||
isNodeLabeled bool
|
||||
nodeInfo *NodeInfo
|
||||
vsp *VSphere
|
||||
)
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("vsphere")
|
||||
Bootstrap(f)
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
|
||||
if !isNodeLabeled {
|
||||
node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns)
|
||||
isNodeLabeled = true
|
||||
nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name)
|
||||
vsp = nodeInfo.VSphere
|
||||
}
|
||||
By("creating vmdk")
|
||||
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
volumePaths = append(volumePaths, volumePath)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
for _, volumePath := range volumePaths {
|
||||
vsp.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
|
||||
}
|
||||
volumePaths = nil
|
||||
})
|
||||
|
||||
/*
|
||||
Steps
|
||||
1. Remove labels assigned to node 1 and node 2
|
||||
2. Delete VMDK volume
|
||||
*/
|
||||
framework.AddCleanupAction(func() {
|
||||
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
|
||||
if len(ns) > 0 {
|
||||
if len(node1KeyValueLabel) > 0 {
|
||||
framework.RemoveLabelOffNode(c, node1Name, NodeLabelKey)
|
||||
}
|
||||
if len(node2KeyValueLabel) > 0 {
|
||||
framework.RemoveLabelOffNode(c, node2Name, NodeLabelKey)
|
||||
}
|
||||
}
|
||||
})
|
||||
/*
|
||||
Steps
|
||||
|
||||
1. Create pod Spec with volume path of the vmdk and NodeSelector set to label assigned to node1.
|
||||
2. Create pod and wait for pod to become ready.
|
||||
3. Verify volume is attached to the node1.
|
||||
4. Create empty file on the volume to verify volume is writable.
|
||||
5. Verify newly created file and previously created files exist on the volume.
|
||||
6. Delete pod.
|
||||
7. Wait for volume to be detached from the node1.
|
||||
8. Repeat Step 1 to 7 and make sure back to back pod creation on same worker node with the same volume is working as expected.
|
||||
|
||||
*/
|
||||
|
||||
It("should create and delete pod with the same volume source on the same worker node", func() {
|
||||
var volumeFiles []string
|
||||
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
|
||||
|
||||
// Create empty files on the mounted volumes on the pod to verify volume is writable
|
||||
// Verify newly and previously created files present on the volume mounted on the pod
|
||||
newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns)
|
||||
volumeFiles = append(volumeFiles, newEmptyFileName)
|
||||
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
|
||||
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
|
||||
|
||||
By(fmt.Sprintf("Creating pod on the same node: %v", node1Name))
|
||||
pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
|
||||
|
||||
// Create empty files on the mounted volumes on the pod to verify volume is writable
|
||||
// Verify newly and previously created files present on the volume mounted on the pod
|
||||
newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns)
|
||||
volumeFiles = append(volumeFiles, newEmptyFileName)
|
||||
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
|
||||
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
|
||||
})
|
||||
|
||||
/*
|
||||
Steps
|
||||
|
||||
1. Create pod Spec with volume path of the vmdk1 and NodeSelector set to node1's label.
|
||||
2. Create pod and wait for POD to become ready.
|
||||
3. Verify volume is attached to the node1.
|
||||
4. Create empty file on the volume to verify volume is writable.
|
||||
5. Verify newly created file and previously created files exist on the volume.
|
||||
6. Delete pod.
|
||||
7. Wait for volume to be detached from the node1.
|
||||
8. Create pod Spec with volume path of the vmdk1 and NodeSelector set to node2's label.
|
||||
9. Create pod and wait for pod to become ready.
|
||||
10. Verify volume is attached to the node2.
|
||||
11. Create empty file on the volume to verify volume is writable.
|
||||
12. Verify newly created file and previously created files exist on the volume.
|
||||
13. Delete pod.
|
||||
*/
|
||||
|
||||
It("should create and delete pod with the same volume source attach/detach to different worker nodes", func() {
|
||||
var volumeFiles []string
|
||||
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
|
||||
// Create empty files on the mounted volumes on the pod to verify volume is writable
|
||||
// Verify newly and previously created files present on the volume mounted on the pod
|
||||
newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns)
|
||||
volumeFiles = append(volumeFiles, newEmptyFileName)
|
||||
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
|
||||
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
|
||||
|
||||
By(fmt.Sprintf("Creating pod on the another node: %v", node2Name))
|
||||
pod = createPodWithVolumeAndNodeSelector(c, ns, node2Name, node2KeyValueLabel, volumePaths)
|
||||
|
||||
newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns)
|
||||
volumeFiles = append(volumeFiles, newEmptyFileName)
|
||||
// Create empty files on the mounted volumes on the pod to verify volume is writable
|
||||
// Verify newly and previously created files present on the volume mounted on the pod
|
||||
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
|
||||
deletePodAndWaitForVolumeToDetach(f, c, pod, node2Name, volumePaths)
|
||||
})
|
||||
|
||||
/*
|
||||
Test multiple volumes from same datastore within the same pod
|
||||
1. Create volumes - vmdk2
|
||||
2. Create pod Spec with volume path of vmdk1 (vmdk1 is created in test setup) and vmdk2.
|
||||
3. Create pod using spec created in step-2 and wait for pod to become ready.
|
||||
4. Verify both volumes are attached to the node on which pod are created. Write some data to make sure volume are accessible.
|
||||
5. Delete pod.
|
||||
6. Wait for vmdk1 and vmdk2 to be detached from node.
|
||||
7. Create pod using spec created in step-2 and wait for pod to become ready.
|
||||
8. Verify both volumes are attached to the node on which PODs are created. Verify volume contents are matching with the content written in step 4.
|
||||
9. Delete POD.
|
||||
10. Wait for vmdk1 and vmdk2 to be detached from node.
|
||||
*/
|
||||
|
||||
It("should create and delete pod with multiple volumes from same datastore", func() {
|
||||
By("creating another vmdk")
|
||||
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
volumePaths = append(volumePaths, volumePath)
|
||||
|
||||
By(fmt.Sprintf("Creating pod on the node: %v with volume: %v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
|
||||
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
|
||||
// Create empty files on the mounted volumes on the pod to verify volume is writable
|
||||
// Verify newly and previously created files present on the volume mounted on the pod
|
||||
volumeFiles := []string{
|
||||
fmt.Sprintf("/mnt/volume1/%v_1.txt", ns),
|
||||
fmt.Sprintf("/mnt/volume2/%v_1.txt", ns),
|
||||
}
|
||||
createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles)
|
||||
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
|
||||
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
|
||||
pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
|
||||
// Create empty files on the mounted volumes on the pod to verify volume is writable
|
||||
// Verify newly and previously created files present on the volume mounted on the pod
|
||||
newEmptyFilesNames := []string{
|
||||
fmt.Sprintf("/mnt/volume1/%v_2.txt", ns),
|
||||
fmt.Sprintf("/mnt/volume2/%v_2.txt", ns),
|
||||
}
|
||||
volumeFiles = append(volumeFiles, newEmptyFilesNames[0])
|
||||
volumeFiles = append(volumeFiles, newEmptyFilesNames[1])
|
||||
createAndVerifyFilesOnVolume(ns, pod.Name, newEmptyFilesNames, volumeFiles)
|
||||
})
|
||||
|
||||
/*
|
||||
Test multiple volumes from different datastore within the same pod
|
||||
1. Create volumes - vmdk2 on non default shared datastore.
|
||||
2. Create pod Spec with volume path of vmdk1 (vmdk1 is created in test setup on default datastore) and vmdk2.
|
||||
3. Create pod using spec created in step-2 and wait for pod to become ready.
|
||||
4. Verify both volumes are attached to the node on which pod are created. Write some data to make sure volume are accessible.
|
||||
5. Delete pod.
|
||||
6. Wait for vmdk1 and vmdk2 to be detached from node.
|
||||
7. Create pod using spec created in step-2 and wait for pod to become ready.
|
||||
8. Verify both volumes are attached to the node on which PODs are created. Verify volume contents are matching with the content written in step 4.
|
||||
9. Delete POD.
|
||||
10. Wait for vmdk1 and vmdk2 to be detached from node.
|
||||
*/
|
||||
It("should create and delete pod with multiple volumes from different datastore", func() {
|
||||
By("creating another vmdk on non default shared datastore")
|
||||
var volumeOptions *VolumeOptions
|
||||
volumeOptions = new(VolumeOptions)
|
||||
volumeOptions.CapacityKB = 2097152
|
||||
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
|
||||
volumeOptions.Datastore = GetAndExpectStringEnvVar(SecondSharedDatastore)
|
||||
volumePath, err := vsp.CreateVolume(volumeOptions, nodeInfo.DataCenterRef)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
volumePaths = append(volumePaths, volumePath)
|
||||
|
||||
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
|
||||
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
|
||||
|
||||
// Create empty files on the mounted volumes on the pod to verify volume is writable
|
||||
// Verify newly and previously created files present on the volume mounted on the pod
|
||||
volumeFiles := []string{
|
||||
fmt.Sprintf("/mnt/volume1/%v_1.txt", ns),
|
||||
fmt.Sprintf("/mnt/volume2/%v_1.txt", ns),
|
||||
}
|
||||
createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles)
|
||||
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
|
||||
|
||||
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
|
||||
pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
|
||||
// Create empty files on the mounted volumes on the pod to verify volume is writable
|
||||
// Verify newly and previously created files present on the volume mounted on the pod
|
||||
newEmptyFileNames := []string{
|
||||
fmt.Sprintf("/mnt/volume1/%v_2.txt", ns),
|
||||
fmt.Sprintf("/mnt/volume2/%v_2.txt", ns),
|
||||
}
|
||||
volumeFiles = append(volumeFiles, newEmptyFileNames[0])
|
||||
volumeFiles = append(volumeFiles, newEmptyFileNames[1])
|
||||
createAndVerifyFilesOnVolume(ns, pod.Name, newEmptyFileNames, volumeFiles)
|
||||
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
|
||||
})
|
||||
|
||||
/*
|
||||
Test Back-to-back pod creation/deletion with different volume sources on the same worker node
|
||||
1. Create volumes - vmdk2
|
||||
2. Create pod Spec - pod-SpecA with volume path of vmdk1 and NodeSelector set to label assigned to node1.
|
||||
3. Create pod Spec - pod-SpecB with volume path of vmdk2 and NodeSelector set to label assigned to node1.
|
||||
4. Create pod-A using pod-SpecA and wait for pod to become ready.
|
||||
5. Create pod-B using pod-SpecB and wait for POD to become ready.
|
||||
6. Verify volumes are attached to the node.
|
||||
7. Create empty file on the volume to make sure volume is accessible. (Perform this step on pod-A and pod-B)
|
||||
8. Verify file created in step 5 is present on the volume. (perform this step on pod-A and pod-B)
|
||||
9. Delete pod-A and pod-B
|
||||
10. Repeatedly (5 times) perform step 4 to 9 and verify associated volume's content is matching.
|
||||
11. Wait for vmdk1 and vmdk2 to be detached from node.
|
||||
*/
|
||||
It("test back to back pod creation and deletion with different volume sources on the same worker node", func() {
|
||||
var (
|
||||
podA *v1.Pod
|
||||
podB *v1.Pod
|
||||
testvolumePathsPodA []string
|
||||
testvolumePathsPodB []string
|
||||
podAFiles []string
|
||||
podBFiles []string
|
||||
)
|
||||
|
||||
defer func() {
|
||||
By("clean up undeleted pods")
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podA), "defer: Failed to delete pod ", podA.Name)
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "defer: Failed to delete pod ", podB.Name)
|
||||
By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name))
|
||||
for _, volumePath := range volumePaths {
|
||||
framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node1Name))
|
||||
}
|
||||
}()
|
||||
|
||||
testvolumePathsPodA = append(testvolumePathsPodA, volumePaths[0])
|
||||
// Create another VMDK Volume
|
||||
By("creating another vmdk")
|
||||
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
volumePaths = append(volumePaths, volumePath)
|
||||
testvolumePathsPodB = append(testvolumePathsPodA, volumePath)
|
||||
|
||||
for index := 0; index < 5; index++ {
|
||||
By(fmt.Sprintf("Creating pod-A on the node: %v with volume: %v", node1Name, testvolumePathsPodA[0]))
|
||||
podA = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodA)
|
||||
|
||||
By(fmt.Sprintf("Creating pod-B on the node: %v with volume: %v", node1Name, testvolumePathsPodB[0]))
|
||||
podB = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodB)
|
||||
|
||||
podAFileName := fmt.Sprintf("/mnt/volume1/podA_%v_%v.txt", ns, index+1)
|
||||
podBFileName := fmt.Sprintf("/mnt/volume1/podB_%v_%v.txt", ns, index+1)
|
||||
podAFiles = append(podAFiles, podAFileName)
|
||||
podBFiles = append(podBFiles, podBFileName)
|
||||
|
||||
// Create empty files on the mounted volumes on the pod to verify volume is writable
|
||||
By("Creating empty file on volume mounted on pod-A")
|
||||
framework.CreateEmptyFileOnPod(ns, podA.Name, podAFileName)
|
||||
|
||||
By("Creating empty file volume mounted on pod-B")
|
||||
framework.CreateEmptyFileOnPod(ns, podB.Name, podBFileName)
|
||||
|
||||
// Verify newly and previously created files present on the volume mounted on the pod
|
||||
By("Verify newly Created file and previously created files present on volume mounted on pod-A")
|
||||
verifyFilesExistOnVSphereVolume(ns, podA.Name, podAFiles...)
|
||||
By("Verify newly Created file and previously created files present on volume mounted on pod-B")
|
||||
verifyFilesExistOnVSphereVolume(ns, podB.Name, podBFiles...)
|
||||
|
||||
By("Deleting pod-A")
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podA), "Failed to delete pod ", podA.Name)
|
||||
By("Deleting pod-B")
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "Failed to delete pod ", podB.Name)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
func testSetupVolumePlacement(client clientset.Interface, namespace string) (node1Name string, node1KeyValueLabel map[string]string, node2Name string, node2KeyValueLabel map[string]string) {
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(client)
|
||||
if len(nodes.Items) < 2 {
|
||||
framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
|
||||
}
|
||||
node1Name = nodes.Items[0].Name
|
||||
node2Name = nodes.Items[1].Name
|
||||
node1LabelValue := "vsphere_e2e_" + string(uuid.NewUUID())
|
||||
node1KeyValueLabel = make(map[string]string)
|
||||
node1KeyValueLabel[NodeLabelKey] = node1LabelValue
|
||||
framework.AddOrUpdateLabelOnNode(client, node1Name, NodeLabelKey, node1LabelValue)
|
||||
|
||||
node2LabelValue := "vsphere_e2e_" + string(uuid.NewUUID())
|
||||
node2KeyValueLabel = make(map[string]string)
|
||||
node2KeyValueLabel[NodeLabelKey] = node2LabelValue
|
||||
framework.AddOrUpdateLabelOnNode(client, node2Name, NodeLabelKey, node2LabelValue)
|
||||
return node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel
|
||||
}
|
||||
|
||||
func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, volumePaths []string) *v1.Pod {
|
||||
var pod *v1.Pod
|
||||
var err error
|
||||
By(fmt.Sprintf("Creating pod on the node: %v", nodeName))
|
||||
podspec := getVSpherePodSpecWithVolumePaths(volumePaths, nodeKeyValueLabel, nil)
|
||||
|
||||
pod, err = client.CoreV1().Pods(namespace).Create(podspec)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("Waiting for pod to be ready")
|
||||
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
|
||||
|
||||
By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName))
|
||||
for _, volumePath := range volumePaths {
|
||||
isAttached, err := diskIsAttached(volumePath, nodeName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(isAttached).To(BeTrue(), "disk:"+volumePath+" is not attached with the node")
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func createAndVerifyFilesOnVolume(namespace string, podname string, newEmptyfilesToCreate []string, filesToCheck []string) {
|
||||
// Create empty files on the mounted volumes on the pod to verify volume is writable
|
||||
By(fmt.Sprintf("Creating empty file on volume mounted on: %v", podname))
|
||||
createEmptyFilesOnVSphereVolume(namespace, podname, newEmptyfilesToCreate)
|
||||
|
||||
// Verify newly and previously created files present on the volume mounted on the pod
|
||||
By(fmt.Sprintf("Verify newly Created file and previously created files present on volume mounted on: %v", podname))
|
||||
verifyFilesExistOnVSphereVolume(namespace, podname, filesToCheck...)
|
||||
}
|
||||
|
||||
func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, nodeName string, volumePaths []string) {
|
||||
By("Deleting pod")
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name)
|
||||
|
||||
By("Waiting for volume to be detached from the node")
|
||||
for _, volumePath := range volumePaths {
|
||||
framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, nodeName))
|
||||
}
|
||||
}
|
176
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go
generated
vendored
Normal file
176
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go
generated
vendored
Normal file
@@ -0,0 +1,176 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
/*
|
||||
Test to verify that a volume remains attached through vpxd restart.
|
||||
|
||||
For the number of schedulable nodes:
|
||||
1. Create a Volume with default options.
|
||||
2. Create a Pod with the created Volume.
|
||||
3. Verify that the Volume is attached.
|
||||
4. Create a file with random contents under the Volume's mount point on the Pod.
|
||||
5. Stop the vpxd service on the vCenter host.
|
||||
6. Verify that the file is accessible on the Pod and that it's contents match.
|
||||
7. Start the vpxd service on the vCenter host.
|
||||
8. Verify that the Volume remains attached, the file is accessible on the Pod, and that it's contents match.
|
||||
9. Delete the Pod and wait for the Volume to be detached.
|
||||
10. Delete the Volume.
|
||||
*/
|
||||
var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vsphere][Serial][Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("restart-vpxd")
|
||||
|
||||
type node struct {
|
||||
name string
|
||||
kvLabels map[string]string
|
||||
nodeInfo *NodeInfo
|
||||
}
|
||||
|
||||
const (
|
||||
labelKey = "vsphere_e2e_label_vpxd_restart"
|
||||
vpxdServiceName = "vmware-vpxd"
|
||||
)
|
||||
|
||||
var (
|
||||
client clientset.Interface
|
||||
namespace string
|
||||
vcNodesMap map[string][]node
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
// Requires SSH access to vCenter.
|
||||
framework.SkipUnlessProviderIs("vsphere")
|
||||
|
||||
Bootstrap(f)
|
||||
client = f.ClientSet
|
||||
namespace = f.Namespace.Name
|
||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
|
||||
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(client)
|
||||
numNodes := len(nodes.Items)
|
||||
Expect(numNodes).NotTo(BeZero(), "No nodes are available for testing volume access through vpxd restart")
|
||||
|
||||
vcNodesMap = make(map[string][]node)
|
||||
for i := 0; i < numNodes; i++ {
|
||||
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodes.Items[i].Name)
|
||||
nodeName := nodes.Items[i].Name
|
||||
nodeLabel := "vsphere_e2e_" + string(uuid.NewUUID())
|
||||
framework.AddOrUpdateLabelOnNode(client, nodeName, labelKey, nodeLabel)
|
||||
|
||||
vcHost := nodeInfo.VSphere.Config.Hostname
|
||||
vcNodesMap[vcHost] = append(vcNodesMap[vcHost], node{
|
||||
name: nodeName,
|
||||
kvLabels: map[string]string{labelKey: nodeLabel},
|
||||
nodeInfo: nodeInfo,
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
It("verify volume remains attached through vpxd restart", func() {
|
||||
for vcHost, nodes := range vcNodesMap {
|
||||
var (
|
||||
volumePaths []string
|
||||
filePaths []string
|
||||
fileContents []string
|
||||
pods []*v1.Pod
|
||||
)
|
||||
|
||||
framework.Logf("Testing for nodes on vCenter host: %s", vcHost)
|
||||
|
||||
for i, node := range nodes {
|
||||
By(fmt.Sprintf("Creating test vsphere volume %d", i))
|
||||
volumePath, err := node.nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, node.nodeInfo.DataCenterRef)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
volumePaths = append(volumePaths, volumePath)
|
||||
|
||||
By(fmt.Sprintf("Creating pod %d on node %v", i, node.name))
|
||||
podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, node.kvLabels, nil)
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(podspec)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Waiting for pod %d to be ready", i))
|
||||
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
|
||||
|
||||
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pods = append(pods, pod)
|
||||
|
||||
nodeName := pod.Spec.NodeName
|
||||
By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName))
|
||||
expectVolumeToBeAttached(nodeName, volumePath)
|
||||
|
||||
By(fmt.Sprintf("Creating a file with random content on the volume mounted on pod %d", i))
|
||||
filePath := fmt.Sprintf("/mnt/volume1/%v_vpxd_restart_test_%v.txt", namespace, strconv.FormatInt(time.Now().UnixNano(), 10))
|
||||
randomContent := fmt.Sprintf("Random Content -- %v", strconv.FormatInt(time.Now().UnixNano(), 10))
|
||||
err = writeContentToPodFile(namespace, pod.Name, filePath, randomContent)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
filePaths = append(filePaths, filePath)
|
||||
fileContents = append(fileContents, randomContent)
|
||||
}
|
||||
|
||||
By("Stopping vpxd on the vCenter host")
|
||||
vcAddress := vcHost + ":22"
|
||||
err := invokeVCenterServiceControl("stop", vpxdServiceName, vcAddress)
|
||||
Expect(err).NotTo(HaveOccurred(), "Unable to stop vpxd on the vCenter host")
|
||||
|
||||
expectFilesToBeAccessible(namespace, pods, filePaths)
|
||||
expectFileContentsToMatch(namespace, pods, filePaths, fileContents)
|
||||
|
||||
By("Starting vpxd on the vCenter host")
|
||||
err = invokeVCenterServiceControl("start", vpxdServiceName, vcAddress)
|
||||
Expect(err).NotTo(HaveOccurred(), "Unable to start vpxd on the vCenter host")
|
||||
|
||||
expectVolumesToBeAttached(pods, volumePaths)
|
||||
expectFilesToBeAccessible(namespace, pods, filePaths)
|
||||
expectFileContentsToMatch(namespace, pods, filePaths, fileContents)
|
||||
|
||||
for i, node := range nodes {
|
||||
pod := pods[i]
|
||||
nodeName := pod.Spec.NodeName
|
||||
volumePath := volumePaths[i]
|
||||
|
||||
By(fmt.Sprintf("Deleting pod on node %s", nodeName))
|
||||
err = framework.DeletePodWithWait(f, client, pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Waiting for volume %s to be detached from node %s", volumePath, nodeName))
|
||||
err = waitForVSphereDiskToDetach(volumePath, nodeName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Deleting volume %s", volumePath))
|
||||
err = node.nodeInfo.VSphere.DeleteVolume(volumePath, node.nodeInfo.DataCenterRef)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
354
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go
generated
vendored
Normal file
354
vendor/k8s.io/kubernetes/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go
generated
vendored
Normal file
@@ -0,0 +1,354 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"time"
|
||||
|
||||
"strings"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
VmfsDatastore = "sharedVmfs-0"
|
||||
VsanDatastore = "vsanDatastore"
|
||||
Datastore = "datastore"
|
||||
Policy_DiskStripes = "diskStripes"
|
||||
Policy_HostFailuresToTolerate = "hostFailuresToTolerate"
|
||||
Policy_CacheReservation = "cacheReservation"
|
||||
Policy_ObjectSpaceReservation = "objectSpaceReservation"
|
||||
Policy_IopsLimit = "iopsLimit"
|
||||
DiskFormat = "diskformat"
|
||||
ThinDisk = "thin"
|
||||
SpbmStoragePolicy = "storagepolicyname"
|
||||
BronzeStoragePolicy = "bronze"
|
||||
HostFailuresToTolerateCapabilityVal = "0"
|
||||
CacheReservationCapabilityVal = "20"
|
||||
DiskStripesCapabilityVal = "1"
|
||||
ObjectSpaceReservationCapabilityVal = "30"
|
||||
IopsLimitCapabilityVal = "100"
|
||||
StripeWidthCapabilityVal = "2"
|
||||
DiskStripesCapabilityInvalidVal = "14"
|
||||
HostFailuresToTolerateCapabilityInvalidVal = "4"
|
||||
DummyVMPrefixName = "vsphere-k8s"
|
||||
DiskStripesCapabilityMaxVal = "11"
|
||||
)
|
||||
|
||||
/*
|
||||
Test to verify the storage policy based management for dynamic volume provisioning inside kubernetes.
|
||||
There are 2 ways to achieve it:
|
||||
1. Specify VSAN storage capabilities in the storage-class.
|
||||
2. Use existing vCenter SPBM storage policies.
|
||||
|
||||
Valid VSAN storage capabilities are mentioned below:
|
||||
1. hostFailuresToTolerate
|
||||
2. forceProvisioning
|
||||
3. cacheReservation
|
||||
4. diskStripes
|
||||
5. objectSpaceReservation
|
||||
6. iopsLimit
|
||||
|
||||
Steps
|
||||
1. Create StorageClass with.
|
||||
a. VSAN storage capabilities set to valid/invalid values (or)
|
||||
b. Use existing vCenter SPBM storage policies.
|
||||
2. Create PVC which uses the StorageClass created in step 1.
|
||||
3. Wait for PV to be provisioned.
|
||||
4. Wait for PVC's status to become Bound
|
||||
5. Create pod using PVC on specific node.
|
||||
6. Wait for Disk to be attached to the node.
|
||||
7. Delete pod and Wait for Volume Disk to be detached from the Node.
|
||||
8. Delete PVC, PV and Storage Class
|
||||
|
||||
|
||||
*/
|
||||
|
||||
var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsphere]", func() {
|
||||
f := framework.NewDefaultFramework("volume-vsan-policy")
|
||||
var (
|
||||
client clientset.Interface
|
||||
namespace string
|
||||
scParameters map[string]string
|
||||
policyName string
|
||||
tagPolicy string
|
||||
masterNode string
|
||||
)
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("vsphere")
|
||||
Bootstrap(f)
|
||||
client = f.ClientSet
|
||||
namespace = f.Namespace.Name
|
||||
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
|
||||
tagPolicy = GetAndExpectStringEnvVar(SPBMTagPolicy)
|
||||
framework.Logf("framework: %+v", f)
|
||||
scParameters = make(map[string]string)
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
if !(len(nodeList.Items) > 0) {
|
||||
framework.Failf("Unable to find ready and schedulable Node")
|
||||
}
|
||||
masternodes, _ := framework.GetMasterAndWorkerNodesOrDie(client)
|
||||
Expect(masternodes).NotTo(BeEmpty())
|
||||
masterNode = masternodes.List()[0]
|
||||
})
|
||||
|
||||
// Valid policy.
|
||||
It("verify VSAN storage capability with valid hostFailuresToTolerate and cacheReservation values is honored for dynamically provisioned pvc using storageclass", func() {
|
||||
By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s, cacheReservation: %s", HostFailuresToTolerateCapabilityVal, CacheReservationCapabilityVal))
|
||||
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityVal
|
||||
scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||
})
|
||||
|
||||
// Valid policy.
|
||||
It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values is honored for dynamically provisioned pvc using storageclass", func() {
|
||||
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
|
||||
scParameters[Policy_DiskStripes] = "1"
|
||||
scParameters[Policy_ObjectSpaceReservation] = "30"
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||
})
|
||||
|
||||
// Valid policy.
|
||||
It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values and a VSAN datastore is honored for dynamically provisioned pvc using storageclass", func() {
|
||||
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
|
||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
|
||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||
scParameters[Datastore] = VsanDatastore
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||
})
|
||||
|
||||
// Valid policy.
|
||||
It("verify VSAN storage capability with valid objectSpaceReservation and iopsLimit values is honored for dynamically provisioned pvc using storageclass", func() {
|
||||
By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReservation: %s, iopsLimit: %s", ObjectSpaceReservationCapabilityVal, IopsLimitCapabilityVal))
|
||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||
scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||
})
|
||||
|
||||
// Invalid VSAN storage capabilties parameters.
|
||||
It("verify VSAN storage capability with invalid capability name objectSpaceReserve is not honored for dynamically provisioned pvc using storageclass", func() {
|
||||
By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal))
|
||||
scParameters["objectSpaceReserve"] = ObjectSpaceReservationCapabilityVal
|
||||
scParameters[Policy_DiskStripes] = StripeWidthCapabilityVal
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
Expect(err).To(HaveOccurred())
|
||||
errorMsg := "invalid option \\\"objectSpaceReserve\\\" for volume plugin kubernetes.io/vsphere-volume"
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
Expect(err).NotTo(HaveOccurred(), errorMsg)
|
||||
}
|
||||
})
|
||||
|
||||
// Invalid policy on a VSAN test bed.
|
||||
// diskStripes value has to be between 1 and 12.
|
||||
It("verify VSAN storage capability with invalid diskStripes value is not honored for dynamically provisioned pvc using storageclass", func() {
|
||||
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, cacheReservation: %s", DiskStripesCapabilityInvalidVal, CacheReservationCapabilityVal))
|
||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityInvalidVal
|
||||
scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
Expect(err).To(HaveOccurred())
|
||||
errorMsg := "Invalid value for " + Policy_DiskStripes + "."
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
Expect(err).NotTo(HaveOccurred(), errorMsg)
|
||||
}
|
||||
})
|
||||
|
||||
// Invalid policy on a VSAN test bed.
|
||||
// hostFailuresToTolerate value has to be between 0 and 3 including.
|
||||
It("verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass", func() {
|
||||
By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal))
|
||||
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
Expect(err).To(HaveOccurred())
|
||||
errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "."
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
Expect(err).NotTo(HaveOccurred(), errorMsg)
|
||||
}
|
||||
})
|
||||
|
||||
// Specify a valid VSAN policy on a non-VSAN test bed.
|
||||
// The test should fail.
|
||||
It("verify VSAN storage capability with non-vsan datastore is not honored for dynamically provisioned pvc using storageclass", func() {
|
||||
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s and a non-VSAN datastore: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal, VmfsDatastore))
|
||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
|
||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||
scParameters[Datastore] = VmfsDatastore
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
Expect(err).To(HaveOccurred())
|
||||
errorMsg := "The specified datastore: \\\"" + VmfsDatastore + "\\\" is not a VSAN datastore. " +
|
||||
"The policy parameters will work only with VSAN Datastore."
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
Expect(err).NotTo(HaveOccurred(), errorMsg)
|
||||
}
|
||||
})
|
||||
|
||||
It("verify an existing and compatible SPBM policy is honored for dynamically provisioned pvc using storageclass", func() {
|
||||
By(fmt.Sprintf("Invoking test for SPBM policy: %s", policyName))
|
||||
scParameters[SpbmStoragePolicy] = policyName
|
||||
scParameters[DiskFormat] = ThinDisk
|
||||
framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
|
||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||
})
|
||||
|
||||
It("verify clean up of stale dummy VM for dynamically provisioned pvc using SPBM policy", func() {
|
||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityMaxVal
|
||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||
scParameters[Datastore] = VsanDatastore
|
||||
framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
|
||||
kubernetesClusterName := GetAndExpectStringEnvVar(KubernetesClusterName)
|
||||
invokeStaleDummyVMTestWithStoragePolicy(client, masterNode, namespace, kubernetesClusterName, scParameters)
|
||||
})
|
||||
|
||||
It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func() {
|
||||
By(fmt.Sprintf("Invoking test for SPBM policy: %s and datastore: %s", tagPolicy, VsanDatastore))
|
||||
scParameters[SpbmStoragePolicy] = tagPolicy
|
||||
scParameters[Datastore] = VsanDatastore
|
||||
scParameters[DiskFormat] = ThinDisk
|
||||
framework.Logf("Invoking test for SPBM storage policy on a non-compatible datastore: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
Expect(err).To(HaveOccurred())
|
||||
errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + tagPolicy + "\\\""
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
Expect(err).NotTo(HaveOccurred(), errorMsg)
|
||||
}
|
||||
})
|
||||
|
||||
It("verify if a non-existing SPBM policy is not honored for dynamically provisioned pvc using storageclass", func() {
|
||||
By(fmt.Sprintf("Invoking test for SPBM policy: %s", BronzeStoragePolicy))
|
||||
scParameters[SpbmStoragePolicy] = BronzeStoragePolicy
|
||||
scParameters[DiskFormat] = ThinDisk
|
||||
framework.Logf("Invoking test for non-existing SPBM storage policy: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
Expect(err).To(HaveOccurred())
|
||||
errorMsg := "no pbm profile found with name: \\\"" + BronzeStoragePolicy + "\\"
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
Expect(err).NotTo(HaveOccurred(), errorMsg)
|
||||
}
|
||||
})
|
||||
|
||||
It("verify an if a SPBM policy and VSAN capabilities cannot be honored for dynamically provisioned pvc using storageclass", func() {
|
||||
By(fmt.Sprintf("Invoking test for SPBM policy: %s with VSAN storage capabilities", policyName))
|
||||
scParameters[SpbmStoragePolicy] = policyName
|
||||
Expect(scParameters[SpbmStoragePolicy]).NotTo(BeEmpty())
|
||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
|
||||
scParameters[DiskFormat] = ThinDisk
|
||||
framework.Logf("Invoking test for SPBM storage policy and VSAN capabilities together: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
Expect(err).To(HaveOccurred())
|
||||
errorMsg := "Cannot specify storage policy capabilities along with storage policy name. Please specify only one"
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
Expect(err).NotTo(HaveOccurred(), errorMsg)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, namespace string, scParameters map[string]string) {
|
||||
By("Creating Storage Class With storage policy params")
|
||||
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters))
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
|
||||
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
|
||||
|
||||
By("Creating PVC using the Storage Class")
|
||||
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
|
||||
|
||||
var pvclaims []*v1.PersistentVolumeClaim
|
||||
pvclaims = append(pvclaims, pvclaim)
|
||||
By("Waiting for claim to be in bound phase")
|
||||
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating pod to attach PV to the node")
|
||||
// Create pod to attach Volume to Node
|
||||
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Verify the volume is accessible and available in the pod")
|
||||
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
|
||||
|
||||
By("Deleting pod")
|
||||
framework.DeletePodWithWait(f, client, pod)
|
||||
|
||||
By("Waiting for volumes to be detached from the node")
|
||||
waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
|
||||
}
|
||||
|
||||
func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error {
|
||||
By("Creating Storage Class With storage policy params")
|
||||
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters))
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
|
||||
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
|
||||
|
||||
By("Creating PVC using the Storage Class")
|
||||
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
|
||||
|
||||
By("Waiting for claim to be in bound phase")
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{})
|
||||
return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)
|
||||
}
|
||||
|
||||
func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterNode string, namespace string, clusterName string, scParameters map[string]string) {
|
||||
By("Creating Storage Class With storage policy params")
|
||||
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters))
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
|
||||
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
|
||||
|
||||
By("Creating PVC using the Storage Class")
|
||||
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
var pvclaims []*v1.PersistentVolumeClaim
|
||||
pvclaims = append(pvclaims, pvclaim)
|
||||
By("Expect claim to fail provisioning volume")
|
||||
_, err = framework.WaitForPVClaimBoundPhase(client, pvclaims, 2*time.Minute)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
updatedClaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(pvclaim.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
vmName := clusterName + "-dynamic-pvc-" + string(updatedClaim.UID)
|
||||
framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
|
||||
// Wait for 6 minutes to let the vSphere Cloud Provider clean up routine delete the dummy VM
|
||||
time.Sleep(6 * time.Minute)
|
||||
|
||||
fnvHash := fnv.New32a()
|
||||
fnvHash.Write([]byte(vmName))
|
||||
dummyVMFullName := DummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32())
|
||||
errorMsg := "Dummy VM - " + vmName + "is still present. Failing the test.."
|
||||
nodeInfo := TestContext.NodeMapper.GetNodeInfo(masterNode)
|
||||
Expect(nodeInfo.VSphere.IsVMPresent(dummyVMFullName, nodeInfo.DataCenterRef)).NotTo(BeTrue(), errorMsg)
|
||||
}
|
Reference in New Issue
Block a user