Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -11,91 +11,45 @@ go_library(
"metrics_statfs.go",
"plugins.go",
"volume.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"volume_unsupported.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"volume_unsupported.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"volume_unsupported.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"volume_unsupported.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"volume_linux.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"volume_unsupported.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"volume_unsupported.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"volume_unsupported.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"volume_unsupported.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"volume_unsupported.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"volume_unsupported.go",
],
"//conditions:default": [],
}),
"volume_linux.go",
"volume_unsupported.go",
],
importpath = "k8s.io/kubernetes/pkg/volume",
visibility = ["//visibility:public"],
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/util/io:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume/util/fs:go_default_library",
"//pkg/volume/util/recyclerclient:go_default_library",
"//staging/src/k8s.io/api/authentication/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/authentication/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"metrics_du_test.go",
"metrics_nil_test.go",
"metrics_statfs_test.go",
"plugins_test.go",
],
embed = [":go_default_library"],
deps = [
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)
go_test(
name = "go_default_xtest",
srcs = [
"metrics_statfs_test.go",
] + select({
"@io_bazel_rules_go//go/platform:linux": [
"metrics_du_test.go",
],
"//conditions:default": [],
}),
deps = [
":go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:linux": [
"//vendor/golang.org/x/sys/unix:go_default_library",

View File

@@ -15,3 +15,5 @@ reviewers:
- gnufied
- verult
- davidz627
labels:
- sig/storage

View File

@@ -25,12 +25,13 @@ go_library(
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
],
)
@@ -47,13 +48,13 @@ go_test(
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)

View File

@@ -39,8 +39,12 @@ type awsElasticBlockStoreAttacher struct {
var _ volume.Attacher = &awsElasticBlockStoreAttacher{}
var _ volume.DeviceMounter = &awsElasticBlockStoreAttacher{}
var _ volume.AttachableVolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.DeviceMountableVolumePlugin = &awsElasticBlockStorePlugin{}
func (plugin *awsElasticBlockStorePlugin) NewAttacher() (volume.Attacher, error) {
awsCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
@@ -53,9 +57,13 @@ func (plugin *awsElasticBlockStorePlugin) NewAttacher() (volume.Attacher, error)
}, nil
}
func (plugin *awsElasticBlockStorePlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return plugin.NewAttacher()
}
func (plugin *awsElasticBlockStorePlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
return mounter.GetMountRefs(deviceMountPath)
}
func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
@@ -236,6 +244,8 @@ type awsElasticBlockStoreDetacher struct {
var _ volume.Detacher = &awsElasticBlockStoreDetacher{}
var _ volume.DeviceUnmounter = &awsElasticBlockStoreDetacher{}
func (plugin *awsElasticBlockStorePlugin) NewDetacher() (volume.Detacher, error) {
awsCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
@@ -248,6 +258,10 @@ func (plugin *awsElasticBlockStorePlugin) NewDetacher() (volume.Detacher, error)
}, nil
}
func (plugin *awsElasticBlockStorePlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return plugin.NewDetacher()
}
func (detacher *awsElasticBlockStoreDetacher) Detach(volumeName string, nodeName types.NodeName) error {
volumeID := aws.KubernetesVolumeID(path.Base(volumeName))

View File

@@ -99,7 +99,7 @@ func (plugin *awsElasticBlockStorePlugin) SupportsBulkVolumeVerification() bool
func (plugin *awsElasticBlockStorePlugin) GetVolumeLimits() (map[string]int64, error) {
volumeLimits := map[string]int64{
util.EBSVolumeLimitKey: 39,
util.EBSVolumeLimitKey: util.DefaultMaxEBSVolumes,
}
cloud := plugin.host.GetCloudProvider()
@@ -127,8 +127,8 @@ func (plugin *awsElasticBlockStorePlugin) GetVolumeLimits() (map[string]int64, e
return volumeLimits, nil
}
if ok, _ := regexp.MatchString("^[cm]5.*", instanceType); ok {
volumeLimits[util.EBSVolumeLimitKey] = 25
if ok, _ := regexp.MatchString(util.EBSNitroLimitRegex, instanceType); ok {
volumeLimits[util.EBSVolumeLimitKey] = util.DefaultMaxEBSNitroVolumeLimit
}
return volumeLimits, nil
@@ -314,7 +314,7 @@ var _ volume.VolumePluginWithAttachLimits = &awsElasticBlockStorePlugin{}
// Abstract interface to PD operations.
type ebsManager interface {
CreateVolume(provisioner *awsElasticBlockStoreProvisioner) (volumeID aws.KubernetesVolumeID, volumeSizeGB int, labels map[string]string, fstype string, err error)
CreateVolume(provisioner *awsElasticBlockStoreProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID aws.KubernetesVolumeID, volumeSizeGB int, labels map[string]string, fstype string, err error)
// Deletes a volume
DeleteVolume(deleter *awsElasticBlockStoreDeleter) error
}
@@ -504,7 +504,7 @@ func (c *awsElasticBlockStoreProvisioner) Provision(selectedNode *v1.Node, allow
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
}
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c)
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c, selectedNode, allowedTopologies)
if err != nil {
glog.Errorf("Provision failed: %v", err)
return nil, err
@@ -514,6 +514,15 @@ func (c *awsElasticBlockStoreProvisioner) Provision(selectedNode *v1.Node, allow
fstype = "ext4"
}
var volumeMode *v1.PersistentVolumeMode
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
volumeMode = c.options.PVC.Spec.VolumeMode
if volumeMode != nil && *volumeMode == v1.PersistentVolumeBlock {
// Block volumes should not have any FSType
fstype = ""
}
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: c.options.PVName,
@@ -528,6 +537,7 @@ func (c *awsElasticBlockStoreProvisioner) Provision(selectedNode *v1.Node, allow
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
VolumeMode: volumeMode,
PersistentVolumeSource: v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: string(volumeID),
@@ -544,17 +554,22 @@ func (c *awsElasticBlockStoreProvisioner) Provision(selectedNode *v1.Node, allow
pv.Spec.AccessModes = c.plugin.GetAccessModes()
}
requirements := make([]v1.NodeSelectorRequirement, 0)
if len(labels) != 0 {
if pv.Labels == nil {
pv.Labels = make(map[string]string)
}
for k, v := range labels {
pv.Labels[k] = v
requirements = append(requirements, v1.NodeSelectorRequirement{Key: k, Operator: v1.NodeSelectorOpIn, Values: []string{v}})
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
pv.Spec.VolumeMode = c.options.PVC.Spec.VolumeMode
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
pv.Spec.NodeAffinity = new(v1.VolumeNodeAffinity)
pv.Spec.NodeAffinity.Required = new(v1.NodeSelector)
pv.Spec.NodeAffinity.Required.NodeSelectorTerms = make([]v1.NodeSelectorTerm, 1)
pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions = requirements
}
return pv, nil

View File

@@ -33,11 +33,7 @@ import (
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
)
var _ volume.VolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.PersistentVolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.BlockVolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.DeletableVolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.ProvisionableVolumePlugin = &awsElasticBlockStorePlugin{}
func (plugin *awsElasticBlockStorePlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
pluginDir := plugin.host.GetVolumeDevicePluginDir(awsElasticBlockStorePluginName)

View File

@@ -84,7 +84,7 @@ type fakePDManager struct {
// TODO(jonesdl) To fully test this, we could create a loopback device
// and mount that instead.
func (fake *fakePDManager) CreateVolume(c *awsElasticBlockStoreProvisioner) (volumeID aws.KubernetesVolumeID, volumeSizeGB int, labels map[string]string, fstype string, err error) {
func (fake *fakePDManager) CreateVolume(c *awsElasticBlockStoreProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID aws.KubernetesVolumeID, volumeSizeGB int, labels map[string]string, fstype string, err error) {
labels = make(map[string]string)
labels["fakepdmanager"] = "yes"
return "test-aws-volume-name", 100, labels, "", nil
@@ -173,6 +173,7 @@ func TestPlugin(t *testing.T) {
if err != nil {
t.Errorf("Error creating new provisioner:%v", err)
}
persistentSpec, err := provisioner.Provision(nil, nil)
if err != nil {
t.Errorf("Provision() failed: %v", err)
@@ -191,6 +192,37 @@ func TestPlugin(t *testing.T) {
t.Errorf("Provision() returned unexpected labels: %v", persistentSpec.Labels)
}
// check nodeaffinity members
if persistentSpec.Spec.NodeAffinity == nil {
t.Errorf("Provision() returned unexpected nil NodeAffinity")
}
if persistentSpec.Spec.NodeAffinity.Required == nil {
t.Errorf("Provision() returned unexpected nil NodeAffinity.Required")
}
n := len(persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms)
if n != 1 {
t.Errorf("Provision() returned unexpected number of NodeSelectorTerms %d. Expected %d", n, 1)
}
n = len(persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions)
if n != 1 {
t.Errorf("Provision() returned unexpected number of MatchExpressions %d. Expected %d", n, 1)
}
req := persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0]
if req.Key != "fakepdmanager" {
t.Errorf("Provision() returned unexpected requirement key in NodeAffinity %v", req.Key)
}
if req.Operator != v1.NodeSelectorOpIn {
t.Errorf("Provision() returned unexpected requirement operator in NodeAffinity %v", req.Operator)
}
if len(req.Values) != 1 || req.Values[0] != "yes" {
t.Errorf("Provision() returned unexpected requirement value in NodeAffinity %v", req.Values)
}
// Test Deleter
volSpec := &volume.Spec{
PersistentVolume: persistentSpec,

View File

@@ -26,6 +26,8 @@ import (
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/volume"
@@ -40,6 +42,7 @@ const (
maxRetries = 10
checkSleepDuration = time.Second
errorSleepDuration = 5 * time.Second
ebsMaxReplicasInAZ = 1
)
type AWSDiskUtil struct{}
@@ -67,7 +70,7 @@ func (util *AWSDiskUtil) DeleteVolume(d *awsElasticBlockStoreDeleter) error {
// CreateVolume creates an AWS EBS volume.
// Returns: volumeID, volumeSizeGB, labels, error
func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (aws.KubernetesVolumeID, int, map[string]string, string, error) {
func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (aws.KubernetesVolumeID, int, map[string]string, string, error) {
cloud, err := getCloudProvider(c.awsElasticBlockStore.plugin.host.GetCloudProvider())
if err != nil {
return "", 0, nil, "", err
@@ -83,50 +86,16 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (aws.K
tags["Name"] = volumeutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // AWS tags can have 255 characters
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
// AWS works with gigabytes, convert to GiB with rounding up
requestGB := int(volumeutil.RoundUpSize(requestBytes, 1024*1024*1024))
volumeOptions := &aws.VolumeOptions{
CapacityGB: requestGB,
Tags: tags,
PVCName: c.options.PVC.Name,
}
fstype := ""
// Apply Parameters (case-insensitive). We leave validation of
// the values to the cloud provider.
volumeOptions.ZonePresent = false
volumeOptions.ZonesPresent = false
for k, v := range c.options.Parameters {
switch strings.ToLower(k) {
case "type":
volumeOptions.VolumeType = v
case "zone":
volumeOptions.ZonePresent = true
volumeOptions.AvailabilityZone = v
case "zones":
volumeOptions.ZonesPresent = true
volumeOptions.AvailabilityZones = v
case "iopspergb":
volumeOptions.IOPSPerGB, err = strconv.Atoi(v)
if err != nil {
return "", 0, nil, "", fmt.Errorf("invalid iopsPerGB value %q, must be integer between 1 and 30: %v", v, err)
}
case "encrypted":
volumeOptions.Encrypted, err = strconv.ParseBool(v)
if err != nil {
return "", 0, nil, "", fmt.Errorf("invalid encrypted boolean value %q, must be true or false: %v", v, err)
}
case "kmskeyid":
volumeOptions.KmsKeyId = v
case volume.VolumeParameterFSType:
fstype = v
default:
return "", 0, nil, "", fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName())
}
zonesWithNodes, err := cloud.GetCandidateZonesForDynamicVolume()
if err != nil {
return "", 0, nil, "", fmt.Errorf("error querying for all zones: %v", err)
}
if volumeOptions.ZonePresent && volumeOptions.ZonesPresent {
return "", 0, nil, "", fmt.Errorf("both zone and zones StorageClass parameters must not be used at the same time")
volumeOptions, err := populateVolumeOptions(c.plugin.GetPluginName(), c.options.PVC.Name, capacity, tags, c.options.Parameters, node, allowedTopologies, zonesWithNodes)
if err != nil {
glog.V(2).Infof("Error populating EBS options: %v", err)
return "", 0, nil, "", err
}
// TODO: implement PVC.Selector parsing
@@ -147,7 +116,69 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (aws.K
glog.Errorf("error building labels for new EBS volume %q: %v", name, err)
}
return name, int(requestGB), labels, fstype, nil
fstype := ""
if v, ok := c.options.Parameters[volume.VolumeParameterFSType]; ok {
fstype = v
}
return name, volumeOptions.CapacityGB, labels, fstype, nil
}
// returns volumeOptions for EBS based on storageclass parameters and node configuration
func populateVolumeOptions(pluginName, pvcName string, capacityGB resource.Quantity, tags map[string]string, storageParams map[string]string, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm, zonesWithNodes sets.String) (*aws.VolumeOptions, error) {
requestGiB, err := volumeutil.RoundUpToGiBInt(capacityGB)
if err != nil {
return nil, err
}
volumeOptions := &aws.VolumeOptions{
CapacityGB: requestGiB,
Tags: tags,
}
// Apply Parameters (case-insensitive). We leave validation of
// the values to the cloud provider.
zonePresent := false
zonesPresent := false
var zone string
var zones sets.String
for k, v := range storageParams {
switch strings.ToLower(k) {
case "type":
volumeOptions.VolumeType = v
case "zone":
zonePresent = true
zone = v
case "zones":
zonesPresent = true
zones, err = volumeutil.ZonesToSet(v)
if err != nil {
return nil, fmt.Errorf("error parsing zones %s, must be strings separated by commas: %v", zones, err)
}
case "iopspergb":
volumeOptions.IOPSPerGB, err = strconv.Atoi(v)
if err != nil {
return nil, fmt.Errorf("invalid iopsPerGB value %q, must be integer between 1 and 30: %v", v, err)
}
case "encrypted":
volumeOptions.Encrypted, err = strconv.ParseBool(v)
if err != nil {
return nil, fmt.Errorf("invalid encrypted boolean value %q, must be true or false: %v", v, err)
}
case "kmskeyid":
volumeOptions.KmsKeyId = v
case volume.VolumeParameterFSType:
// Do nothing but don't make this fail
default:
return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, pluginName)
}
}
volumeOptions.AvailabilityZone, err = volumeutil.SelectZoneForVolume(zonePresent, zonesPresent, zone, zones, zonesWithNodes, node, allowedTopologies, pvcName)
if err != nil {
return nil, err
}
return volumeOptions, nil
}
// Returns the first path that exists, or empty string if none exist.

View File

@@ -11,68 +11,37 @@ go_library(
srcs = [
"attacher.go",
"azure_common.go",
"azure_common_linux.go",
"azure_common_unsupported.go",
"azure_common_windows.go",
"azure_dd.go",
"azure_dd_block.go",
"azure_mounter.go",
"azure_provision.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"azure_common_unsupported.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"azure_common_unsupported.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"azure_common_unsupported.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"azure_common_unsupported.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"azure_common_linux.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"azure_common_unsupported.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"azure_common_unsupported.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"azure_common_unsupported.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"azure_common_unsupported.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"azure_common_unsupported.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"azure_common_windows.go",
],
"//conditions:default": [],
}),
],
importpath = "k8s.io/kubernetes/pkg/volume/azure_dd",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/azure:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/util/keymutex:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
],
)
@@ -95,15 +64,20 @@ go_test(
"azure_common_test.go",
"azure_dd_block_test.go",
"azure_dd_test.go",
"azure_provision_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
],
)

View File

@@ -25,7 +25,7 @@ import (
"strconv"
"time"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute"
"github.com/golang/glog"
"k8s.io/api/core/v1"
@@ -52,8 +52,11 @@ type azureDiskAttacher struct {
var _ volume.Attacher = &azureDiskAttacher{}
var _ volume.Detacher = &azureDiskDetacher{}
var _ volume.DeviceMounter = &azureDiskAttacher{}
var _ volume.DeviceUnmounter = &azureDiskDetacher{}
// acquire lock to get an lun number
var getLunMutex = keymutex.NewKeyMutex()
var getLunMutex = keymutex.NewHashed(0)
// Attach attaches a volume.Spec to an Azure VM referenced by NodeName, returning the disk's LUN
func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
@@ -84,9 +87,9 @@ func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (
if err == nil {
// Volume is already attached to node.
glog.V(4).Infof("Attach operation is successful. volume %q is already attached to node %q at lun %d.", volumeSource.DiskName, instanceid, lun)
glog.V(2).Infof("Attach operation is successful. volume %q is already attached to node %q at lun %d.", volumeSource.DiskName, instanceid, lun)
} else {
glog.V(4).Infof("GetDiskLun returned: %v. Initiating attaching volume %q to node %q.", err, volumeSource.DataDiskURI, nodeName)
glog.V(2).Infof("GetDiskLun returned: %v. Initiating attaching volume %q to node %q.", err, volumeSource.DataDiskURI, nodeName)
getLunMutex.LockKey(instanceid)
defer getLunMutex.UnlockKey(instanceid)
@@ -95,11 +98,11 @@ func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (
glog.Warningf("no LUN available for instance %q (%v)", nodeName, err)
return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q (%v)", volumeSource.DiskName, instanceid, err)
}
glog.V(4).Infof("Trying to attach volume %q lun %d to node %q.", volumeSource.DataDiskURI, lun, nodeName)
glog.V(2).Infof("Trying to attach volume %q lun %d to node %q.", volumeSource.DataDiskURI, lun, nodeName)
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
err = diskController.AttachDisk(isManagedDisk, volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode))
if err == nil {
glog.V(4).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName)
glog.V(2).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName)
} else {
glog.V(2).Infof("Attach volume %q to instance %q failed with %v", volumeSource.DataDiskURI, instanceid, err)
return "", fmt.Errorf("Attach volume %q to instance %q failed with %v", volumeSource.DiskName, instanceid, err)
@@ -164,13 +167,13 @@ func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string,
nodeName := types.NodeName(a.plugin.host.GetHostName())
diskName := volumeSource.DiskName
glog.V(5).Infof("azureDisk - WaitForAttach: begin to GetDiskLun by diskName(%s), DataDiskURI(%s), nodeName(%s), devicePath(%s)",
glog.V(2).Infof("azureDisk - WaitForAttach: begin to GetDiskLun by diskName(%s), DataDiskURI(%s), nodeName(%s), devicePath(%s)",
diskName, volumeSource.DataDiskURI, nodeName, devicePath)
lun, err := diskController.GetDiskLun(diskName, volumeSource.DataDiskURI, nodeName)
if err != nil {
return "", err
}
glog.V(5).Infof("azureDisk - WaitForAttach: GetDiskLun succeeded, got lun(%v)", lun)
glog.V(2).Infof("azureDisk - WaitForAttach: GetDiskLun succeeded, got lun(%v)", lun)
exec := a.plugin.host.GetExec(a.plugin.GetPluginName())
io := &osIOHandler{}
@@ -278,7 +281,7 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro
return nil
}
glog.V(4).Infof("detach %v from node %q", diskURI, nodeName)
glog.V(2).Infof("detach %v from node %q", diskURI, nodeName)
diskController, err := getDiskController(d.plugin.host)
if err != nil {
@@ -301,9 +304,9 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro
func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error {
err := util.UnmountPath(deviceMountPath, detacher.plugin.host.GetMounter(detacher.plugin.GetPluginName()))
if err == nil {
glog.V(4).Infof("azureDisk - Device %s was unmounted", deviceMountPath)
glog.V(2).Infof("azureDisk - Device %s was unmounted", deviceMountPath)
} else {
glog.Infof("azureDisk - Device %s failed to unmount with error: %s", deviceMountPath, err.Error())
glog.Warningf("azureDisk - Device %s failed to unmount with error: %s", deviceMountPath, err.Error())
}
return err
}

View File

@@ -37,7 +37,7 @@ import (
const (
defaultStorageAccountType = storage.StandardLRS
defaultAzureDiskKind = v1.AzureSharedBlobDisk
defaultAzureDiskKind = v1.AzureManagedDisk
defaultAzureDataDiskCachingMode = v1.AzureDataDiskCachingNone
)
@@ -59,8 +59,6 @@ var (
string(api.AzureSharedBlobDisk),
string(api.AzureDedicatedBlobDisk),
string(api.AzureManagedDisk))
supportedStorageAccountTypes = sets.NewString("Premium_LRS", "Standard_LRS", "Standard_GRS", "Standard_RAGRS")
)
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
@@ -127,11 +125,15 @@ func normalizeStorageAccountType(storageAccountType string) (storage.SkuName, er
return defaultStorageAccountType, nil
}
if !supportedStorageAccountTypes.Has(storageAccountType) {
return "", fmt.Errorf("azureDisk - %s is not supported sku/storageaccounttype. Supported values are %s", storageAccountType, supportedStorageAccountTypes.List())
sku := storage.SkuName(storageAccountType)
supportedSkuNames := storage.PossibleSkuNameValues()
for _, s := range supportedSkuNames {
if sku == s {
return sku, nil
}
}
return storage.SkuName(storageAccountType), nil
return "", fmt.Errorf("azureDisk - %s is not supported sku/storageaccounttype. Supported values are %s", storageAccountType, supportedSkuNames)
}
func normalizeCachingMode(cachingMode v1.AzureDataDiskCachingMode) (v1.AzureDataDiskCachingMode, error) {

View File

@@ -24,6 +24,9 @@ import (
"testing"
"time"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/util/mount"
)
@@ -134,3 +137,53 @@ func TestIoHandler(t *testing.T) {
}
}
}
func TestNormalizeStorageAccountType(t *testing.T) {
tests := []struct {
storageAccountType string
expectedAccountType storage.SkuName
expectError bool
}{
{
storageAccountType: "",
expectedAccountType: storage.StandardLRS,
expectError: false,
},
{
storageAccountType: "NOT_EXISTING",
expectedAccountType: "",
expectError: true,
},
{
storageAccountType: "Standard_LRS",
expectedAccountType: storage.StandardLRS,
expectError: false,
},
{
storageAccountType: "Premium_LRS",
expectedAccountType: storage.PremiumLRS,
expectError: false,
},
{
storageAccountType: "Standard_GRS",
expectedAccountType: storage.StandardGRS,
expectError: false,
},
{
storageAccountType: "Standard_RAGRS",
expectedAccountType: storage.StandardRAGRS,
expectError: false,
},
{
storageAccountType: "Standard_ZRS",
expectedAccountType: storage.StandardZRS,
expectError: false,
},
}
for _, test := range tests {
result, err := normalizeStorageAccountType(test.storageAccountType)
assert.Equal(t, result, test.expectedAccountType)
assert.Equal(t, err != nil, test.expectError, fmt.Sprintf("error msg: %v", err))
}
}

View File

@@ -17,16 +17,19 @@ limitations under the License.
package azure_dd
import (
"context"
"fmt"
"strings"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
@@ -36,7 +39,7 @@ type DiskController interface {
CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error)
DeleteBlobDisk(diskUri string) error
CreateManagedDisk(diskName string, storageAccountType storage.SkuName, sizeGB int, tags map[string]string) (string, error)
CreateManagedDisk(options *azure.ManagedDiskOptions) (string, error)
DeleteManagedDisk(diskURI string) error
// Attaches the disk to the host machine.
@@ -58,7 +61,16 @@ type DiskController interface {
DeleteVolume(diskURI string) error
// Expand the disk to new size
ResizeDisk(diskName string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error)
ResizeDisk(diskURI string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error)
// GetAzureDiskLabels gets availability zone labels for Azuredisk.
GetAzureDiskLabels(diskURI string) (map[string]string, error)
// GetActiveZones returns all the zones in which k8s nodes are currently running.
GetActiveZones() (sets.String, error)
// GetLocation returns the location in which k8s cluster is currently running.
GetLocation() string
}
type azureDataDiskPlugin struct {
@@ -72,9 +84,14 @@ var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.AttachableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.VolumePluginWithAttachLimits = &azureDataDiskPlugin{}
var _ volume.ExpandableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.DeviceMountableVolumePlugin = &azureDataDiskPlugin{}
// store vm size list in current region
var vmSizeList *[]compute.VirtualMachineSize
const (
azureDataDiskPluginName = "kubernetes.io/azure-disk"
defaultAzureVolumeLimit = 16
)
func ProbeVolumePlugins() []volume.VolumePlugin {
@@ -118,26 +135,65 @@ func (plugin *azureDataDiskPlugin) SupportsBulkVolumeVerification() bool {
func (plugin *azureDataDiskPlugin) GetVolumeLimits() (map[string]int64, error) {
volumeLimits := map[string]int64{
util.AzureVolumeLimitKey: 16,
util.AzureVolumeLimitKey: defaultAzureVolumeLimit,
}
cloud := plugin.host.GetCloudProvider()
// if we can't fetch cloudprovider we return an error
// hoping external CCM or admin can set it. Returning
// default values from here will mean, no one can
// override them.
if cloud == nil {
return nil, fmt.Errorf("No cloudprovider present")
az, err := getCloud(plugin.host)
if err != nil {
// if we can't fetch cloudprovider we return an error
// hoping external CCM or admin can set it. Returning
// default values from here will mean, no one can
// override them.
return nil, fmt.Errorf("failed to get azure cloud in GetVolumeLimits, plugin.host: %s", plugin.host.GetHostName())
}
if cloud.ProviderName() != azure.CloudProviderName {
return nil, fmt.Errorf("Expected Azure cloudprovider, got %s", cloud.ProviderName())
instances, ok := az.Instances()
if !ok {
glog.Warningf("Failed to get instances from cloud provider")
return volumeLimits, nil
}
instanceType, err := instances.InstanceType(context.TODO(), plugin.host.GetNodeName())
if err != nil {
glog.Errorf("Failed to get instance type from Azure cloud provider, nodeName: %s", plugin.host.GetNodeName())
return volumeLimits, nil
}
if vmSizeList == nil {
result, err := az.VirtualMachineSizesClient.List(context.TODO(), az.Location)
if err != nil || result.Value == nil {
glog.Errorf("failed to list vm sizes in GetVolumeLimits, plugin.host: %s, location: %s", plugin.host.GetHostName(), az.Location)
return volumeLimits, nil
}
vmSizeList = result.Value
}
volumeLimits = map[string]int64{
util.AzureVolumeLimitKey: getMaxDataDiskCount(instanceType, vmSizeList),
}
return volumeLimits, nil
}
func getMaxDataDiskCount(instanceType string, sizeList *[]compute.VirtualMachineSize) int64 {
if sizeList == nil {
return defaultAzureVolumeLimit
}
vmsize := strings.ToUpper(instanceType)
for _, size := range *sizeList {
if size.Name == nil || size.MaxDataDiskCount == nil {
glog.Errorf("failed to get vm size in getMaxDataDiskCount")
continue
}
if strings.ToUpper(*size.Name) == vmsize {
glog.V(2).Infof("got a matching size in getMaxDataDiskCount, Name: %s, MaxDataDiskCount: %s", *size.Name, *size.MaxDataDiskCount)
return int64(*size.MaxDataDiskCount)
}
}
return defaultAzureVolumeLimit
}
func (plugin *azureDataDiskPlugin) VolumeLimitKey(spec *volume.Spec) string {
return util.AzureVolumeLimitKey
}
@@ -242,7 +298,7 @@ func (plugin *azureDataDiskPlugin) ExpandVolumeDevice(
return oldSize, err
}
return diskController.ResizeDisk(spec.PersistentVolume.Spec.AzureDisk.DiskName, oldSize, newSize)
return diskController.ResizeDisk(spec.PersistentVolume.Spec.AzureDisk.DataDiskURI, oldSize, newSize)
}
func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
@@ -267,5 +323,13 @@ func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volumeName, mountPath str
func (plugin *azureDataDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
m := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(m, deviceMountPath)
return m.GetMountRefs(deviceMountPath)
}
func (plugin *azureDataDiskPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return plugin.NewAttacher()
}
func (plugin *azureDataDiskPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return plugin.NewDetacher()
}

View File

@@ -20,6 +20,10 @@ import (
"os"
"testing"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute"
"github.com/Azure/go-autorest/autorest/to"
"github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
@@ -53,3 +57,37 @@ func TestCanSupport(t *testing.T) {
// fakeAzureProvider type was removed because all functions were not used
// Testing mounting will require path calculation which depends on the cloud provider, which is faked in the above test.
func TestGetMaxDataDiskCount(t *testing.T) {
tests := []struct {
instanceType string
sizeList *[]compute.VirtualMachineSize
expectResult int64
}{
{
instanceType: "standard_d2_v2",
sizeList: &[]compute.VirtualMachineSize{
{Name: to.StringPtr("Standard_D2_V2"), MaxDataDiskCount: to.Int32Ptr(8)},
{Name: to.StringPtr("Standard_D3_V2"), MaxDataDiskCount: to.Int32Ptr(16)},
},
expectResult: 8,
},
{
instanceType: "NOT_EXISTING",
sizeList: &[]compute.VirtualMachineSize{
{Name: to.StringPtr("Standard_D2_V2"), MaxDataDiskCount: to.Int32Ptr(8)},
},
expectResult: defaultAzureVolumeLimit,
},
{
instanceType: "",
sizeList: &[]compute.VirtualMachineSize{},
expectResult: defaultAzureVolumeLimit,
},
}
for _, test := range tests {
result := getMaxDataDiskCount(test.instanceType, test.sizeList)
assert.Equal(t, test.expectResult, result)
}
}

View File

@@ -17,14 +17,19 @@ limitations under the License.
package azure_dd
import (
"errors"
"fmt"
"strconv"
"strings"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
"k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
@@ -67,6 +72,25 @@ func (d *azureDiskDeleter) Delete() error {
return diskController.DeleteBlobDisk(volumeSource.DataDiskURI)
}
// parseZoned parsed 'zoned' for storage class. If zoned is not specified (empty string),
// then it defaults to true for managed disks.
func parseZoned(zonedString string, kind v1.AzureDataDiskKind) (bool, error) {
if zonedString == "" {
return kind == v1.AzureManagedDisk, nil
}
zoned, err := strconv.ParseBool(zonedString)
if err != nil {
return false, fmt.Errorf("failed to parse 'zoned': %v", err)
}
if zoned && kind != v1.AzureManagedDisk {
return false, fmt.Errorf("zoned is only supported by managed disks")
}
return zoned, nil
}
func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
if !util.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
@@ -84,7 +108,7 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
if len(p.options.PVC.Spec.AccessModes) == 1 {
if p.options.PVC.Spec.AccessModes[0] != supportedModes[0] {
return nil, fmt.Errorf("AzureDisk - mode %s is not supporetd by AzureDisk plugin supported mode is %s", p.options.PVC.Spec.AccessModes[0], supportedModes)
return nil, fmt.Errorf("AzureDisk - mode %s is not supported by AzureDisk plugin (supported mode is %s)", p.options.PVC.Spec.AccessModes[0], supportedModes)
}
}
@@ -94,12 +118,23 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
cachingMode v1.AzureDataDiskCachingMode
strKind string
err error
resourceGroup string
zoned bool
zonePresent bool
zonesPresent bool
strZoned string
availabilityZone string
availabilityZones sets.String
selectedAvailabilityZone string
)
// maxLength = 79 - (4 for ".vhd") = 75
name := util.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75)
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
requestGB := int(util.RoundUpSize(requestBytes, 1024*1024*1024))
requestGiB, err := util.RoundUpToGiBInt(capacity)
if err != nil {
return nil, err
}
for k, v := range p.options.Parameters {
switch strings.ToLower(k) {
@@ -117,6 +152,19 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
cachingMode = v1.AzureDataDiskCachingMode(v)
case volume.VolumeParameterFSType:
fsType = strings.ToLower(v)
case "resourcegroup":
resourceGroup = v
case "zone":
zonePresent = true
availabilityZone = v
case "zones":
zonesPresent = true
availabilityZones, err = util.ZonesToSet(v)
if err != nil {
return nil, fmt.Errorf("error parsing zones %s, must be strings separated by commas: %v", v, err)
}
case "zoned":
strZoned = v
default:
return nil, fmt.Errorf("AzureDisk - invalid option %s in storage class", k)
}
@@ -133,6 +181,25 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
return nil, err
}
zoned, err = parseZoned(strZoned, kind)
if err != nil {
return nil, err
}
if kind != v1.AzureManagedDisk {
if resourceGroup != "" {
return nil, errors.New("StorageClass option 'resourceGroup' can be used only for managed disks")
}
if zoned {
return nil, errors.New("StorageClass option 'zoned' parameter is only supported for managed disks")
}
}
if !zoned && (zonePresent || zonesPresent || len(allowedTopologies) > 0) {
return nil, fmt.Errorf("zone, zones and allowedTopologies StorageClass parameters must be used together with zoned parameter")
}
if cachingMode, err = normalizeCachingMode(cachingMode); err != nil {
return nil, err
}
@@ -142,31 +209,74 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
return nil, err
}
// create disk
diskURI := ""
if kind == v1.AzureManagedDisk {
diskURI, err = diskController.CreateManagedDisk(name, skuName, requestGB, *(p.options.CloudTags))
// Select zone for managed disks based on zone, zones and allowedTopologies.
if zoned {
activeZones, err := diskController.GetActiveZones()
if err != nil {
return nil, err
return nil, fmt.Errorf("error querying active zones: %v", err)
}
} else {
if kind == v1.AzureDedicatedBlobDisk {
_, diskURI, _, err = diskController.CreateVolume(name, account, storageAccountType, location, requestGB)
if err != nil {
return nil, err
}
} else {
diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGB)
if availabilityZone != "" || availabilityZones.Len() != 0 || activeZones.Len() != 0 || len(allowedTopologies) != 0 {
selectedAvailabilityZone, err = util.SelectZoneForVolume(zonePresent, zonesPresent, availabilityZone, availabilityZones, activeZones, selectedNode, allowedTopologies, p.options.PVC.Name)
if err != nil {
return nil, err
}
}
}
// create disk
diskURI := ""
labels := map[string]string{}
if kind == v1.AzureManagedDisk {
tags := make(map[string]string)
if p.options.CloudTags != nil {
tags = *(p.options.CloudTags)
}
volumeOptions := &azure.ManagedDiskOptions{
DiskName: name,
StorageAccountType: skuName,
ResourceGroup: resourceGroup,
PVCName: p.options.PVC.Name,
SizeGB: requestGiB,
Tags: tags,
AvailabilityZone: selectedAvailabilityZone,
}
diskURI, err = diskController.CreateManagedDisk(volumeOptions)
if err != nil {
return nil, err
}
labels, err = diskController.GetAzureDiskLabels(diskURI)
if err != nil {
return nil, err
}
} else {
if kind == v1.AzureDedicatedBlobDisk {
_, diskURI, _, err = diskController.CreateVolume(name, account, storageAccountType, location, requestGiB)
if err != nil {
return nil, err
}
} else {
diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGiB)
if err != nil {
return nil, err
}
}
}
var volumeMode *v1.PersistentVolumeMode
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
volumeMode = p.options.PVC.Spec.VolumeMode
if volumeMode != nil && *volumeMode == v1.PersistentVolumeBlock {
// Block volumes should not have any FSType
fsType = ""
}
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: p.options.PVName,
Labels: map[string]string{},
Labels: labels,
Annotations: map[string]string{
"volumehelper.VolumeDynamicallyCreatedByKey": "azure-disk-dynamic-provisioner",
},
@@ -175,8 +285,9 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
PersistentVolumeReclaimPolicy: p.options.PersistentVolumeReclaimPolicy,
AccessModes: supportedModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", requestGB)),
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", requestGiB)),
},
VolumeMode: volumeMode,
PersistentVolumeSource: v1.PersistentVolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
CachingMode: &cachingMode,
@@ -190,8 +301,52 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
},
}
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
pv.Spec.VolumeMode = p.options.PVC.Spec.VolumeMode
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
nodeSelectorTerms := make([]v1.NodeSelectorTerm, 0)
if zoned {
// Set node affinity labels based on availability zone labels.
if len(labels) > 0 {
requirements := make([]v1.NodeSelectorRequirement, 0)
for k, v := range labels {
requirements = append(requirements, v1.NodeSelectorRequirement{Key: k, Operator: v1.NodeSelectorOpIn, Values: []string{v}})
}
nodeSelectorTerms = append(nodeSelectorTerms, v1.NodeSelectorTerm{
MatchExpressions: requirements,
})
}
} else {
// Set node affinity labels based on fault domains.
// This is required because unzoned AzureDisk can't be attached to zoned nodes.
// There are at most 3 fault domains available in each region.
// Refer https://docs.microsoft.com/en-us/azure/virtual-machines/windows/manage-availability.
for i := 0; i < 3; i++ {
requirements := []v1.NodeSelectorRequirement{
{
Key: kubeletapis.LabelZoneRegion,
Operator: v1.NodeSelectorOpIn,
Values: []string{diskController.GetLocation()},
},
{
Key: kubeletapis.LabelZoneFailureDomain,
Operator: v1.NodeSelectorOpIn,
Values: []string{strconv.Itoa(i)},
},
}
nodeSelectorTerms = append(nodeSelectorTerms, v1.NodeSelectorTerm{
MatchExpressions: requirements,
})
}
}
if len(nodeSelectorTerms) > 0 {
pv.Spec.NodeAffinity = &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: nodeSelectorTerms,
},
}
}
}
return pv, nil

View File

@@ -0,0 +1,96 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
)
func TestParseZoned(t *testing.T) {
tests := []struct {
msg string
zoneString string
diskKind v1.AzureDataDiskKind
expected bool
expectError bool
}{
{
msg: "managed disk should default to zoned",
diskKind: v1.AzureManagedDisk,
expected: true,
},
{
msg: "shared blob disk should default to un-zoned",
diskKind: v1.AzureSharedBlobDisk,
expected: false,
},
{
msg: "shared dedicated disk should default to un-zoned",
diskKind: v1.AzureDedicatedBlobDisk,
expected: false,
},
{
msg: "managed disk should support zoned=true",
diskKind: v1.AzureManagedDisk,
zoneString: "true",
expected: true,
},
{
msg: "managed disk should support zoned=false",
diskKind: v1.AzureManagedDisk,
zoneString: "false",
expected: false,
},
{
msg: "shared blob disk should support zoned=false",
diskKind: v1.AzureSharedBlobDisk,
zoneString: "false",
expected: false,
},
{
msg: "shared blob disk shouldn't support zoned=true",
diskKind: v1.AzureSharedBlobDisk,
zoneString: "true",
expectError: true,
},
{
msg: "shared dedicated disk should support zoned=false",
diskKind: v1.AzureDedicatedBlobDisk,
zoneString: "false",
expected: false,
},
{
msg: "dedicated blob disk shouldn't support zoned=true",
diskKind: v1.AzureDedicatedBlobDisk,
zoneString: "true",
expectError: true,
},
}
for i, test := range tests {
real, err := parseZoned(test.zoneString, test.diskKind)
if test.expectError {
assert.Error(t, err, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
} else {
assert.Equal(t, test.expected, real, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
}
}
}

View File

@@ -22,12 +22,12 @@ go_library(
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)
@@ -41,11 +41,11 @@ go_test(
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
],
)

View File

@@ -38,7 +38,7 @@ var _ volume.ProvisionableVolumePlugin = &azureFilePlugin{}
// azure cloud provider should implement it
type azureCloudProvider interface {
// create a file share
CreateFileShare(shareName, accountName, accountType, location string, requestGiB int) (string, string, error)
CreateFileShare(shareName, accountName, accountType, resourceGroup, location string, requestGiB int) (string, string, error)
// delete a file share
DeleteFileShare(accountName, accountKey, shareName string) error
// resize a file share
@@ -139,7 +139,7 @@ func (a *azureFileProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
return nil, fmt.Errorf("%s does not support block volume provisioning", a.plugin.GetPluginName())
}
var sku, location, account string
var sku, resourceGroup, location, account string
// File share name has a length limit of 63, and it cannot contain two consecutive '-'s.
name := util.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 63)
@@ -160,6 +160,8 @@ func (a *azureFileProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
account = v
case "secretnamespace":
secretNamespace = v
case "resourcegroup":
resourceGroup = v
default:
return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, a.plugin.GetPluginName())
}
@@ -169,7 +171,7 @@ func (a *azureFileProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Azure file")
}
account, key, err := a.azureProvider.CreateFileShare(name, account, sku, location, requestGiB)
account, key, err := a.azureProvider.CreateFileShare(name, account, sku, resourceGroup, location, requestGiB)
if err != nil {
return nil, err
}

View File

@@ -31,8 +31,8 @@ const (
dirMode = "dir_mode"
gid = "gid"
vers = "vers"
defaultFileMode = "0755"
defaultDirMode = "0755"
defaultFileMode = "0777"
defaultDirMode = "0777"
defaultVers = "3.0"
)

View File

@@ -18,10 +18,10 @@ go_library(
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)
@@ -33,9 +33,9 @@ go_test(
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
],
)

View File

@@ -1,5 +1,6 @@
approvers:
- rootfs
- jsafrane
- saad-ali
reviewers:
- rootfs
@@ -7,3 +8,4 @@ reviewers:
- jsafrane
- jingxu97
- msau42
- cofyc

View File

@@ -323,8 +323,8 @@ func (cephfsVolume *cephfs) execMount(mountpoint string) error {
}
src += hosts[i] + ":" + cephfsVolume.path
mountOptions := util.JoinMountOptions(cephfsVolume.mountOptions, opt)
if err := cephfsVolume.mounter.Mount(src, mountpoint, "ceph", mountOptions); err != nil {
opt = util.JoinMountOptions(cephfsVolume.mountOptions, opt)
if err := cephfsVolume.mounter.Mount(src, mountpoint, "ceph", opt); err != nil {
return fmt.Errorf("CephFS: mount failed: %v", err)
}
@@ -408,6 +408,17 @@ func (cephfsVolume *cephfs) execFuseMount(mountpoint string) error {
mountArgs = append(mountArgs, "--id")
mountArgs = append(mountArgs, cephfsVolume.id)
// build option array
opt := []string{}
if cephfsVolume.readonly {
opt = append(opt, "ro")
}
opt = util.JoinMountOptions(cephfsVolume.mountOptions, opt)
if len(opt) > 0 {
mountArgs = append(mountArgs, "-o")
mountArgs = append(mountArgs, strings.Join(opt, ","))
}
glog.V(4).Infof("Mounting cmd ceph-fuse with arguments (%s)", mountArgs)
command := exec.Command("ceph-fuse", mountArgs...)
output, err := command.CombinedOutput()

View File

@@ -75,7 +75,6 @@ func TestPlugin(t *testing.T) {
},
},
}
mounter, err := plug.(*cephfsPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{}, "secrets")
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
@@ -223,5 +222,26 @@ func TestGetSecretNameAndNamespaceForPV(t *testing.T) {
err, resultNs, resultName)
}
}
}
func TestGetAccessModes(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("cephfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/cephfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
modes := plug.GetAccessModes()
for _, v := range modes {
if !volumetest.ContainsAccessMode(modes, v) {
t.Errorf("Expected AccessModeTypes: %s", v)
}
}
}

View File

@@ -14,6 +14,6 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Package nfs contains the internal representation of Ceph file system
// Package cephfs contains the internal representation of Ceph file system
// (CephFS) volumes.
package cephfs // import "k8s.io/kubernetes/pkg/volume/cephfs"

View File

@@ -11,6 +11,7 @@ go_library(
srcs = [
"attacher.go",
"cinder.go",
"cinder_block.go",
"cinder_util.go",
"doc.go",
],
@@ -18,20 +19,23 @@ go_library(
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/openstack:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/util/keymutex:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
@@ -40,6 +44,7 @@ go_test(
name = "go_default_test",
srcs = [
"attacher_test.go",
"cinder_block_test.go",
"cinder_test.go",
],
embed = [":go_default_library"],
@@ -48,11 +53,12 @@ go_test(
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)

View File

@@ -40,8 +40,12 @@ type cinderDiskAttacher struct {
var _ volume.Attacher = &cinderDiskAttacher{}
var _ volume.DeviceMounter = &cinderDiskAttacher{}
var _ volume.AttachableVolumePlugin = &cinderPlugin{}
var _ volume.DeviceMountableVolumePlugin = &cinderPlugin{}
const (
probeVolumeInitDelay = 1 * time.Second
probeVolumeFactor = 2.0
@@ -67,9 +71,13 @@ func (plugin *cinderPlugin) NewAttacher() (volume.Attacher, error) {
}, nil
}
func (plugin *cinderPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return plugin.NewAttacher()
}
func (plugin *cinderPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
return mounter.GetMountRefs(deviceMountPath)
}
func (attacher *cinderDiskAttacher) waitOperationFinished(volumeID string) error {
@@ -299,6 +307,8 @@ type cinderDiskDetacher struct {
var _ volume.Detacher = &cinderDiskDetacher{}
var _ volume.DeviceUnmounter = &cinderDiskDetacher{}
func (plugin *cinderPlugin) NewDetacher() (volume.Detacher, error) {
cinder, err := plugin.getCloudProvider()
if err != nil {
@@ -310,6 +320,10 @@ func (plugin *cinderPlugin) NewDetacher() (volume.Detacher, error) {
}, nil
}
func (plugin *cinderPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return plugin.NewDetacher()
}
func (detacher *cinderDiskDetacher) waitOperationFinished(volumeID string) error {
backoff := wait.Backoff{
Duration: operationFinishInitDelay,

View File

@@ -610,13 +610,13 @@ func (testcase *testcase) DiskIsAttachedByName(nodeName types.NodeName, volumeID
return false, instanceID, errors.New("unexpected DiskIsAttachedByName call: wrong instanceID")
}
glog.V(4).Infof("DiskIsAttachedByName call: %s, %s, returning %v, %v", volumeID, nodeName, expected.isAttached, expected.instanceID, expected.ret)
glog.V(4).Infof("DiskIsAttachedByName call: %s, %s, returning %v, %v, %v", volumeID, nodeName, expected.isAttached, expected.instanceID, expected.ret)
return expected.isAttached, expected.instanceID, expected.ret
}
func (testcase *testcase) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error) {
return "", "", false, errors.New("Not implemented")
func (testcase *testcase) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, string, bool, error) {
return "", "", "", false, errors.New("Not implemented")
}
func (testcase *testcase) GetDevicePath(volumeID string) string {

View File

@@ -27,8 +27,10 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/openstack"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/keymutex"
"k8s.io/kubernetes/pkg/util/mount"
kstrings "k8s.io/kubernetes/pkg/util/strings"
@@ -51,7 +53,7 @@ type BlockStorageProvider interface {
AttachDisk(instanceID, volumeID string) (string, error)
DetachDisk(instanceID, volumeID string) error
DeleteVolume(volumeID string) error
CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error)
CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, string, bool, error)
GetDevicePath(volumeID string) string
InstanceID() (string, error)
GetAttachmentDiskPath(instanceID, volumeID string) (string, error)
@@ -85,7 +87,7 @@ func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
func (plugin *cinderPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
plugin.volumeLocks = keymutex.NewKeyMutex()
plugin.volumeLocks = keymutex.NewHashed(0)
return nil
}
@@ -437,7 +439,7 @@ func (c *cinderVolumeUnmounter) TearDownAt(dir string) error {
// Find Cinder volumeID to lock the right volume
// TODO: refactor VolumePlugin.NewUnmounter to get full volume.Spec just like
// NewMounter. We could then find volumeID there without probing MountRefs.
refs, err := mount.GetMountRefs(c.mounter, dir)
refs, err := c.mounter.GetMountRefs(dir)
if err != nil {
glog.V(4).Infof("GetMountRefs failed: %v", err)
return err
@@ -454,7 +456,7 @@ func (c *cinderVolumeUnmounter) TearDownAt(dir string) error {
defer c.plugin.volumeLocks.UnlockKey(c.pdName)
// Reload list of references, there might be SetUpAt finished in the meantime
refs, err = mount.GetMountRefs(c.mounter, dir)
refs, err = c.mounter.GetMountRefs(dir)
if err != nil {
glog.V(4).Infof("GetMountRefs failed: %v", err)
return err
@@ -505,15 +507,20 @@ func (c *cinderVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopolo
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
}
if util.CheckPersistentVolumeClaimModeBlock(c.options.PVC) {
return nil, fmt.Errorf("%s does not support block volume provisioning", c.plugin.GetPluginName())
}
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c)
if err != nil {
return nil, err
}
var volumeMode *v1.PersistentVolumeMode
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
volumeMode = c.options.PVC.Spec.VolumeMode
if volumeMode != nil && *volumeMode == v1.PersistentVolumeBlock {
// Block volumes should not have any FSType
fstype = ""
}
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: c.options.PVName,
@@ -528,6 +535,7 @@ func (c *cinderVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopolo
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
VolumeMode: volumeMode,
PersistentVolumeSource: v1.PersistentVolumeSource{
Cinder: &v1.CinderPersistentVolumeSource{
VolumeID: volumeID,

View File

@@ -0,0 +1,167 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cinder
import (
"fmt"
"path/filepath"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
)
var _ volume.VolumePlugin = &cinderPlugin{}
var _ volume.PersistentVolumePlugin = &cinderPlugin{}
var _ volume.BlockVolumePlugin = &cinderPlugin{}
var _ volume.DeletableVolumePlugin = &cinderPlugin{}
var _ volume.ProvisionableVolumePlugin = &cinderPlugin{}
var _ volume.ExpandableVolumePlugin = &cinderPlugin{}
func (plugin *cinderPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
pluginDir := plugin.host.GetVolumeDevicePluginDir(cinderVolumePluginName)
blkutil := volumepathhandler.NewBlockVolumePathHandler()
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
if err != nil {
return nil, err
}
glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err)
globalMapPath := filepath.Dir(globalMapPathUUID)
if len(globalMapPath) <= 1 {
return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID)
}
return getVolumeSpecFromGlobalMapPath(globalMapPath)
}
func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) {
// Get volume spec information from globalMapPath
// globalMapPath example:
// plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID}
// plugins/kubernetes.io/cinder/volumeDevices/vol-XXXXXX
vID := filepath.Base(globalMapPath)
if len(vID) <= 1 {
return nil, fmt.Errorf("failed to get volumeID from global path=%s", globalMapPath)
}
block := v1.PersistentVolumeBlock
cinderVolume := &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Cinder: &v1.CinderPersistentVolumeSource{
VolumeID: vID,
},
},
VolumeMode: &block,
},
}
return volume.NewSpecFromPersistentVolume(cinderVolume, true), nil
}
// NewBlockVolumeMapper creates a new volume.BlockVolumeMapper from an API specification.
func (plugin *cinderPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
// If this is called via GenerateUnmapDeviceFunc(), pod is nil.
// Pass empty string as dummy uid since uid isn't used in the case.
var uid types.UID
if pod != nil {
uid = pod.UID
}
return plugin.newBlockVolumeMapperInternal(spec, uid, &DiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *cinderPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.BlockVolumeMapper, error) {
pdName, fsType, readOnly, err := getVolumeInfo(spec)
if err != nil {
return nil, err
}
return &cinderVolumeMapper{
cinderVolume: &cinderVolume{
podUID: podUID,
volName: spec.Name(),
pdName: pdName,
fsType: fsType,
manager: manager,
mounter: mounter,
plugin: plugin,
},
readOnly: readOnly}, nil
}
func (plugin *cinderPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
return plugin.newUnmapperInternal(volName, podUID, &DiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *cinderPlugin) newUnmapperInternal(volName string, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.BlockVolumeUnmapper, error) {
return &cinderPluginUnmapper{
cinderVolume: &cinderVolume{
podUID: podUID,
volName: volName,
manager: manager,
mounter: mounter,
plugin: plugin,
}}, nil
}
func (c *cinderPluginUnmapper) TearDownDevice(mapPath, devicePath string) error {
return nil
}
type cinderPluginUnmapper struct {
*cinderVolume
}
var _ volume.BlockVolumeUnmapper = &cinderPluginUnmapper{}
type cinderVolumeMapper struct {
*cinderVolume
readOnly bool
}
var _ volume.BlockVolumeMapper = &cinderVolumeMapper{}
func (b *cinderVolumeMapper) SetUpDevice() (string, error) {
return "", nil
}
func (b *cinderVolumeMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID)
}
// GetGlobalMapPath returns global map path and error
// path: plugins/kubernetes.io/{PluginName}/volumeDevices/volumeID
// plugins/kubernetes.io/cinder/volumeDevices/vol-XXXXXX
func (cd *cinderVolume) GetGlobalMapPath(spec *volume.Spec) (string, error) {
pdName, _, _, err := getVolumeInfo(spec)
if err != nil {
return "", err
}
return filepath.Join(cd.plugin.host.GetVolumeDevicePluginDir(cinderVolumePluginName), pdName), nil
}
// GetPodDeviceMapPath returns pod device map path and volume name
// path: pods/{podUid}/volumeDevices/kubernetes.io~cinder
func (cd *cinderVolume) GetPodDeviceMapPath() (string, string) {
name := cinderVolumePluginName
return cd.plugin.host.GetPodVolumeDeviceDir(cd.podUID, kstrings.EscapeQualifiedNameForDisk(name)), cd.volName
}

View File

@@ -0,0 +1,145 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cinder
import (
"os"
"path"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
const (
testVolName = "vol-1234"
testPVName = "pv1"
testGlobalPath = "plugins/kubernetes.io/cinder/volumeDevices/vol-1234"
testPodPath = "pods/poduid/volumeDevices/kubernetes.io~cinder"
)
func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) {
// make our test path for fake GlobalMapPath
// /tmp symbolized our pluginDir
// /tmp/testGlobalPathXXXXX/plugins/kubernetes.io/cinder/volumeDevices/pdVol1
tmpVDir, err := utiltesting.MkTmpdir("cinderBlockTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
//deferred clean up
defer os.RemoveAll(tmpVDir)
expectedGlobalPath := path.Join(tmpVDir, testGlobalPath)
//Bad Path
badspec, err := getVolumeSpecFromGlobalMapPath("")
if badspec != nil || err == nil {
t.Errorf("Expected not to get spec from GlobalMapPath but did")
}
// Good Path
spec, err := getVolumeSpecFromGlobalMapPath(expectedGlobalPath)
if spec == nil || err != nil {
t.Fatalf("Failed to get spec from GlobalMapPath: %v", err)
}
if spec.PersistentVolume.Spec.Cinder.VolumeID != testVolName {
t.Errorf("Invalid volumeID from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.Cinder.VolumeID)
}
block := v1.PersistentVolumeBlock
specMode := spec.PersistentVolume.Spec.VolumeMode
if &specMode == nil {
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v expected: %v", &specMode, block)
}
if *specMode != block {
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v expected: %v", *specMode, block)
}
}
func getTestVolume(readOnly bool, isBlock bool) *volume.Spec {
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: testPVName,
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Cinder: &v1.CinderPersistentVolumeSource{
VolumeID: testVolName,
},
},
},
}
if isBlock {
blockMode := v1.PersistentVolumeBlock
pv.Spec.VolumeMode = &blockMode
}
return volume.NewSpecFromPersistentVolume(pv, readOnly)
}
func TestGetPodAndPluginMapPaths(t *testing.T) {
tmpVDir, err := utiltesting.MkTmpdir("cinderBlockTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
//deferred clean up
defer os.RemoveAll(tmpVDir)
expectedGlobalPath := path.Join(tmpVDir, testGlobalPath)
expectedPodPath := path.Join(tmpVDir, testPodPath)
spec := getTestVolume(false, true /*isBlock*/)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpVDir, nil, nil))
plug, err := plugMgr.FindMapperPluginByName(cinderVolumePluginName)
if err != nil {
os.RemoveAll(tmpVDir)
t.Fatalf("Can't find the plugin by name: %q", cinderVolumePluginName)
}
if plug.GetPluginName() != cinderVolumePluginName {
t.Fatalf("Wrong name: %s", plug.GetPluginName())
}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mapper, err := plug.NewBlockVolumeMapper(spec, pod, volume.VolumeOptions{})
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
if mapper == nil {
t.Fatalf("Got a nil Mounter")
}
//GetGlobalMapPath
gMapPath, err := mapper.GetGlobalMapPath(spec)
if err != nil || len(gMapPath) == 0 {
t.Fatalf("Invalid GlobalMapPath from spec: %s", spec.PersistentVolume.Spec.Cinder.VolumeID)
}
if gMapPath != expectedGlobalPath {
t.Errorf("Failed to get GlobalMapPath: %s %s", gMapPath, expectedGlobalPath)
}
//GetPodDeviceMapPath
gDevicePath, gVolName := mapper.GetPodDeviceMapPath()
if gDevicePath != expectedPodPath {
t.Errorf("Got unexpected pod path: %s, expected %s", gDevicePath, expectedPodPath)
}
if gVolName != testPVName {
t.Errorf("Got unexpected volNamne: %s, expected %s", gVolName, testPVName)
}
}

View File

@@ -169,9 +169,12 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string,
}
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volSizeBytes := capacity.Value()
// Cinder works with gigabytes, convert to GiB with rounding up
volSizeGB := int(volutil.RoundUpSize(volSizeBytes, 1024*1024*1024))
volSizeGiB, err := volutil.RoundUpToGiBInt(capacity)
if err != nil {
return "", 0, nil, "", err
}
name := volutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // Cinder volume name can have up to 255 characters
vtype := ""
availability := ""
@@ -208,10 +211,10 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string,
}
}
volumeID, volumeAZ, IgnoreVolumeAZ, errr := cloud.CreateVolume(name, volSizeGB, vtype, availability, c.options.CloudTags)
if errr != nil {
glog.V(2).Infof("Error creating cinder volume: %v", errr)
return "", 0, nil, "", errr
volumeID, volumeAZ, volumeRegion, IgnoreVolumeAZ, err := cloud.CreateVolume(name, volSizeGiB, vtype, availability, c.options.CloudTags)
if err != nil {
glog.V(2).Infof("Error creating cinder volume: %v", err)
return "", 0, nil, "", err
}
glog.V(2).Infof("Successfully created cinder volume %s", volumeID)
@@ -219,8 +222,9 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string,
volumeLabels = make(map[string]string)
if IgnoreVolumeAZ == false {
volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ
volumeLabels[kubeletapis.LabelZoneRegion] = volumeRegion
}
return volumeID, volSizeGB, volumeLabels, fstype, nil
return volumeID, volSizeGiB, volumeLabels, fstype, nil
}
func probeAttachedVolume() error {

View File

@@ -14,16 +14,15 @@ go_library(
],
importpath = "k8s.io/kubernetes/pkg/volume/configmap",
deps = [
"//pkg/util/io:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)
@@ -36,11 +35,11 @@ go_test(
"//pkg/volume/empty_dir:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
],
)

View File

@@ -24,7 +24,6 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
ioutil "k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
@@ -93,7 +92,6 @@ func (plugin *configMapPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts v
pod.UID,
plugin,
plugin.host.GetMounter(plugin.GetPluginName()),
plugin.host.GetWriter(),
volume.MetricsNil{},
},
source: *spec.Volume.ConfigMap,
@@ -110,7 +108,6 @@ func (plugin *configMapPlugin) NewUnmounter(volName string, podUID types.UID) (v
podUID,
plugin,
plugin.host.GetMounter(plugin.GetPluginName()),
plugin.host.GetWriter(),
volume.MetricsNil{},
},
}, nil
@@ -131,7 +128,6 @@ type configMapVolume struct {
podUID types.UID
plugin *configMapPlugin
mounter mount.Interface
writer ioutil.Writer
volume.MetricsNil
}
@@ -207,13 +203,6 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
}
}
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
return err
}
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil {
return err
}
totalBytes := totalBytes(configMap)
glog.V(3).Infof("Received configMap %v/%v containing (%v) pieces of data, %v total bytes",
b.pod.Namespace,
@@ -226,6 +215,29 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
return err
}
setupSuccess := false
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
return err
}
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil {
return err
}
defer func() {
// Clean up directories if setup fails
if !setupSuccess {
unmounter, unmountCreateErr := b.plugin.NewUnmounter(b.volName, b.podUID)
if unmountCreateErr != nil {
glog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", b.volName, unmountCreateErr)
return
}
tearDownErr := unmounter.TearDown()
if tearDownErr != nil {
glog.Errorf("Error tearing down volume %s with : %v", b.volName, tearDownErr)
}
}
}()
writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName)
writer, err := volumeutil.NewAtomicWriter(dir, writerContext)
if err != nil {
@@ -244,6 +256,7 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
return err
}
setupSuccess = true
return nil
}

View File

@@ -613,6 +613,66 @@ func volumeSpec(volumeName, configMapName string, defaultMode int32) *v1.Volume
}
}
func TestInvalidConfigMapSetup(t *testing.T) {
var (
testPodUID = types.UID("test_pod_uid")
testVolumeName = "test_volume_name"
testNamespace = "test_configmap_namespace"
testName = "test_configmap_name"
volumeSpec = volumeSpec(testVolumeName, testName, 0644)
configMap = configMap(testNamespace, testName)
client = fake.NewSimpleClientset(&configMap)
pluginMgr = volume.VolumePluginMgr{}
tempDir, host = newTestHost(t, client)
)
volumeSpec.VolumeSource.ConfigMap.Items = []v1.KeyToPath{
{Key: "missing", Path: "missing"},
}
defer os.RemoveAll(tempDir)
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
plugin, err := pluginMgr.FindPluginByName(configMapPluginName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
vName, err := plugin.GetVolumeName(volume.NewSpecFromVolume(volumeSpec))
if err != nil {
t.Errorf("Failed to GetVolumeName: %v", err)
}
if vName != "test_volume_name/test_configmap_name" {
t.Errorf("Got unexpect VolumeName %v", vName)
}
volumePath := mounter.GetPath()
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~configmap/test_volume_name")) {
t.Errorf("Got unexpected path: %s", volumePath)
}
fsGroup := int64(1001)
err = mounter.SetUp(&fsGroup)
if err == nil {
t.Errorf("Expected setup to fail")
}
_, err = os.Stat(volumePath)
if err == nil {
t.Errorf("Expected %s to not exist", volumePath)
}
doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
}
func configMap(namespace, name string) v1.ConfigMap {
return v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{

View File

@@ -14,22 +14,25 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//pkg/features:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/csi/labelmanager:go_default_library",
"//pkg/volume/csi/nodeinfomanager:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/informers/externalversions:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/informers/externalversions/csi/v1alpha1:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/listers/csi/v1alpha1:go_default_library",
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
@@ -44,23 +47,30 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//pkg/features:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/csi/fake:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned/fake:go_default_library",
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
],
)
@@ -76,7 +86,7 @@ filegroup(
srcs = [
":package-srcs",
"//pkg/volume/csi/fake:all-srcs",
"//pkg/volume/csi/labelmanager:all-srcs",
"//pkg/volume/csi/nodeinfomanager:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],

View File

@@ -56,6 +56,8 @@ type csiAttacher struct {
// volume.Attacher methods
var _ volume.Attacher = &csiAttacher{}
var _ volume.DeviceMounter = &csiAttacher{}
func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
if spec == nil {
glog.Error(log("attacher.Attach missing volume.Spec"))
@@ -68,6 +70,16 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string
return "", err
}
skip, err := c.plugin.skipAttach(csiSource.Driver)
if err != nil {
glog.Error(log("attacher.Attach failed to find if driver is attachable: %v", err))
return "", err
}
if skip {
glog.V(4).Infof(log("skipping attach for driver %s", csiSource.Driver))
return "", nil
}
node := string(nodeName)
pvName := spec.PersistentVolume.GetName()
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, node)
@@ -118,6 +130,16 @@ func (c *csiAttacher) WaitForAttach(spec *volume.Spec, attachID string, pod *v1.
return "", err
}
skip, err := c.plugin.skipAttach(source.Driver)
if err != nil {
glog.Error(log("attacher.Attach failed to find if driver is attachable: %v", err))
return "", err
}
if skip {
glog.V(4).Infof(log("Driver is not attachable, skip waiting for attach"))
return "", nil
}
return c.waitForVolumeAttachment(source.VolumeHandle, attachID, timeout)
}
@@ -135,7 +157,7 @@ func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID str
attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
glog.Error(log("attacher.WaitForAttach failed for volume [%s] (will continue to try): %v", volumeHandle, err))
return "", err
return "", fmt.Errorf("volume %v has GET error for volume attachment %v: %v", volumeHandle, attachID, err)
}
// if being deleted, fail fast
if attach.GetDeletionTimestamp() != nil {
@@ -219,11 +241,22 @@ func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.No
glog.Error(log("attacher.VolumesAreAttached failed: %v", err))
continue
}
skip, err := c.plugin.skipAttach(source.Driver)
if err != nil {
glog.Error(log("Failed to check CSIDriver for %s: %s", source.Driver, err))
} else {
if skip {
// This volume is not attachable, pretend it's attached
attached[spec] = true
continue
}
}
attachID := getAttachmentName(source.VolumeHandle, source.Driver, string(nodeName))
glog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID))
attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
attached[spec] = false
glog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err))
continue
}
@@ -245,9 +278,14 @@ func (c *csiAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
return deviceMountPath, nil
}
func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) (err error) {
glog.V(4).Infof(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath))
if deviceMountPath == "" {
err = fmt.Errorf("attacher.MountDevice failed, deviceMountPath is empty")
return err
}
mounted, err := isDirMounted(c.plugin, deviceMountPath)
if err != nil {
glog.Error(log("attacher.MountDevice failed while checking mount status for dir [%s]", deviceMountPath))
@@ -269,6 +307,35 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
return err
}
// Store volume metadata for UnmountDevice. Keep it around even if the
// driver does not support NodeStage, UnmountDevice still needs it.
if err = os.MkdirAll(deviceMountPath, 0750); err != nil {
glog.Error(log("attacher.MountDevice failed to create dir %#v: %v", deviceMountPath, err))
return err
}
glog.V(4).Info(log("created target path successfully [%s]", deviceMountPath))
dataDir := filepath.Dir(deviceMountPath)
data := map[string]string{
volDataKey.volHandle: csiSource.VolumeHandle,
volDataKey.driverName: csiSource.Driver,
}
if err = saveVolumeData(dataDir, volDataFileName, data); err != nil {
glog.Error(log("failed to save volume info data: %v", err))
if cleanerr := os.RemoveAll(dataDir); err != nil {
glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, cleanerr))
}
return err
}
defer func() {
if err != nil {
// clean up metadata
glog.Errorf(log("attacher.MountDevice failed: %v", err))
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
glog.Error(log("attacher.MountDevice failed to remove mount dir after errir [%s]: %v", deviceMountPath, err))
}
}
}()
if c.csiClient == nil {
c.csiClient = newCsiDriverClient(csiSource.Driver)
}
@@ -279,51 +346,28 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
// Check whether "STAGE_UNSTAGE_VOLUME" is set
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
if err != nil {
glog.Error(log("attacher.MountDevice failed to check STAGE_UNSTAGE_VOLUME: %v", err))
return err
}
if !stageUnstageSet {
glog.Infof(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
// defer does *not* remove the metadata file and it's correct - UnmountDevice needs it there.
return nil
}
// Start MountDevice
if deviceMountPath == "" {
return fmt.Errorf("attacher.MountDevice failed, deviceMountPath is empty")
}
nodeName := string(c.plugin.host.GetNodeName())
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
attachment, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
glog.Error(log("attacher.MountDevice failed while getting volume attachment [id=%v]: %v", attachID, err))
return err
}
if attachment == nil {
glog.Error(log("unable to find VolumeAttachment [id=%s]", attachID))
return errors.New("no existing VolumeAttachment found")
}
publishVolumeInfo := attachment.Status.AttachmentMetadata
publishVolumeInfo, err := c.plugin.getPublishVolumeInfo(c.k8s, csiSource.VolumeHandle, csiSource.Driver, nodeName)
nodeStageSecrets := map[string]string{}
if csiSource.NodeStageSecretRef != nil {
nodeStageSecrets, err = getCredentialsFromSecret(c.k8s, csiSource.NodeStageSecretRef)
if err != nil {
return fmt.Errorf("fetching NodeStageSecretRef %s/%s failed: %v",
err = fmt.Errorf("fetching NodeStageSecretRef %s/%s failed: %v",
csiSource.NodeStageSecretRef.Namespace, csiSource.NodeStageSecretRef.Name, err)
return err
}
}
// create target_dir before call to NodeStageVolume
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
glog.Error(log("attacher.MountDevice failed to create dir %#v: %v", deviceMountPath, err))
return err
}
glog.V(4).Info(log("created target path successfully [%s]", deviceMountPath))
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
accessMode := v1.ReadWriteOnce
if spec.PersistentVolume.Spec.AccessModes != nil {
@@ -331,10 +375,6 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
}
fsType := csiSource.FSType
if len(fsType) == 0 {
fsType = defaultFSType
}
err = csi.NodeStageVolume(ctx,
csiSource.VolumeHandle,
publishVolumeInfo,
@@ -345,10 +385,6 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
csiSource.VolumeAttributes)
if err != nil {
glog.Errorf(log("attacher.MountDevice failed: %v", err))
if removeMountDirErr := removeMountDir(c.plugin, deviceMountPath); removeMountDirErr != nil {
glog.Error(log("attacher.MountDevice failed to remove mount dir after a NodeStageVolume() error [%s]: %v", deviceMountPath, removeMountDirErr))
}
return err
}
@@ -358,6 +394,8 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
var _ volume.Detacher = &csiAttacher{}
var _ volume.DeviceUnmounter = &csiAttacher{}
func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error {
// volumeName in format driverName<SEP>volumeHandle generated by plugin.GetVolumeName()
if volumeName == "" {
@@ -461,10 +499,21 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
glog.V(4).Info(log("attacher.UnmountDevice(%s)", deviceMountPath))
// Setup
driverName, volID, err := getDriverAndVolNameFromDeviceMountPath(c.k8s, deviceMountPath)
if err != nil {
glog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err))
return err
var driverName, volID string
dataDir := filepath.Dir(deviceMountPath)
data, err := loadVolumeData(dataDir, volDataFileName)
if err == nil {
driverName = data[volDataKey.driverName]
volID = data[volDataKey.volHandle]
} else {
glog.Error(log("UnmountDevice failed to load volume data file [%s]: %v", dataDir, err))
// The volume might have been mounted by old CSI volume plugin. Fall back to the old behavior: read PV from API server
driverName, volID, err = getDriverAndVolNameFromDeviceMountPath(c.k8s, deviceMountPath)
if err != nil {
glog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err))
return err
}
}
if c.csiClient == nil {
@@ -482,6 +531,11 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
}
if !stageUnstageSet {
glog.Infof(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice..."))
// Just delete the global directory + json file
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
return fmt.Errorf("failed to clean up gloubal mount %s: %s", dataDir, err)
}
return nil
}
@@ -495,6 +549,11 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
return err
}
// Delete the global directory + json file
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
return fmt.Errorf("failed to clean up gloubal mount %s: %s", dataDir, err)
}
glog.V(4).Infof(log("attacher.UnmountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
return nil
}

View File

@@ -18,24 +18,37 @@ package csi
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/golang/glog"
storage "k8s.io/api/storage/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
clientset "k8s.io/client-go/kubernetes"
fakeclient "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
utiltesting "k8s.io/client-go/util/testing"
fakecsi "k8s.io/csi-api/pkg/client/clientset/versioned/fake"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
var (
bFalse = false
bTrue = true
)
func makeTestAttachment(attachID, nodeName, pvName string) *storage.VolumeAttachment {
return &storage.VolumeAttachment{
ObjectMeta: meta.ObjectMeta{
@@ -56,6 +69,40 @@ func makeTestAttachment(attachID, nodeName, pvName string) *storage.VolumeAttach
}
}
func markVolumeAttached(t *testing.T, client clientset.Interface, watch *watch.RaceFreeFakeWatcher, attachID string, status storage.VolumeAttachmentStatus) {
ticker := time.NewTicker(10 * time.Millisecond)
var attach *storage.VolumeAttachment
var err error
defer ticker.Stop()
// wait for attachment to be saved
for i := 0; i < 100; i++ {
attach, err = client.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
<-ticker.C
continue
}
t.Error(err)
}
if attach != nil {
glog.Infof("stopping wait")
break
}
}
glog.Infof("stopped wait")
if attach == nil {
t.Logf("attachment not found for id:%v", attachID)
} else {
attach.Status = status
_, err := client.StorageV1beta1().VolumeAttachments().Update(attach)
if err != nil {
t.Error(err)
}
watch.Modify(attach)
}
}
func TestAttacherAttach(t *testing.T) {
testCases := []struct {
@@ -119,8 +166,7 @@ func TestAttacherAttach(t *testing.T) {
// attacher loop
for i, tc := range testCases {
t.Logf("test case: %s", tc.name)
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t, nil)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
@@ -145,42 +191,144 @@ func TestAttacherAttach(t *testing.T) {
}
}(tc.attachID, tc.nodeName, tc.shouldFail)
// update attachment to avoid long waitForAttachment
ticker := time.NewTicker(10 * time.Millisecond)
defer ticker.Stop()
// wait for attachment to be saved
var attach *storage.VolumeAttachment
for i := 0; i < 100; i++ {
attach, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Get(tc.attachID, meta.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
<-ticker.C
continue
}
t.Error(err)
var status storage.VolumeAttachmentStatus
if tc.injectAttacherError {
status.Attached = false
status.AttachError = &storage.VolumeError{
Message: "attacher error",
}
if attach != nil {
break
}
}
if attach == nil {
t.Logf("attachment not found for id:%v", tc.attachID)
} else {
if tc.injectAttacherError {
attach.Status.Attached = false
attach.Status.AttachError = &storage.VolumeError{
Message: "attacher error",
}
} else {
attach.Status.Attached = true
}
_, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Update(attach)
if err != nil {
t.Error(err)
}
fakeWatcher.Modify(attach)
status.Attached = true
}
markVolumeAttached(t, csiAttacher.k8s, fakeWatcher, tc.attachID, status)
}
}
func TestAttacherWithCSIDriver(t *testing.T) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIDriverRegistry, true)()
tests := []struct {
name string
driver string
expectVolumeAttachment bool
}{
{
name: "CSIDriver not attachable",
driver: "not-attachable",
expectVolumeAttachment: false,
},
{
name: "CSIDriver is attachable",
driver: "attachable",
expectVolumeAttachment: true,
},
{
name: "CSIDriver.AttachRequired not set -> failure",
driver: "nil",
expectVolumeAttachment: true,
},
{
name: "CSIDriver does not exist not set -> failure",
driver: "unknown",
expectVolumeAttachment: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeCSIClient := fakecsi.NewSimpleClientset(
getCSIDriver("not-attachable", nil, &bFalse),
getCSIDriver("attachable", nil, &bTrue),
getCSIDriver("nil", nil, nil),
)
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t, fakeCSIClient)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
if err != nil {
t.Fatalf("failed to create new attacher: %v", err)
}
csiAttacher := attacher.(*csiAttacher)
spec := volume.NewSpecFromPersistentVolume(makeTestPV("test-pv", 10, test.driver, "test-vol"), false)
expectedAttachID := getAttachmentName("test-vol", test.driver, "node")
status := storage.VolumeAttachmentStatus{
Attached: true,
}
if test.expectVolumeAttachment {
go markVolumeAttached(t, csiAttacher.k8s, fakeWatcher, expectedAttachID, status)
}
attachID, err := csiAttacher.Attach(spec, types.NodeName("node"))
if err != nil {
t.Errorf("Attach() failed: %s", err)
}
if test.expectVolumeAttachment && attachID == "" {
t.Errorf("Epected attachID, got nothing")
}
if !test.expectVolumeAttachment && attachID != "" {
t.Errorf("Epected empty attachID, got %q", attachID)
}
})
}
}
func TestAttacherWaitForVolumeAttachmentWithCSIDriver(t *testing.T) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIDriverRegistry, true)()
// In order to detect if the volume plugin would skip WaitForAttach for non-attachable drivers,
// we do not instantiate any VolumeAttachment. So if the plugin does not skip attach, WaitForVolumeAttachment
// will return an error that volume attachment was not found.
tests := []struct {
name string
driver string
expectError bool
}{
{
name: "CSIDriver not attachable -> success",
driver: "not-attachable",
expectError: false,
},
{
name: "CSIDriver is attachable -> failure",
driver: "attachable",
expectError: true,
},
{
name: "CSIDriver.AttachRequired not set -> failure",
driver: "nil",
expectError: true,
},
{
name: "CSIDriver does not exist not set -> failure",
driver: "unknown",
expectError: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeCSIClient := fakecsi.NewSimpleClientset(
getCSIDriver("not-attachable", nil, &bFalse),
getCSIDriver("attachable", nil, &bTrue),
getCSIDriver("nil", nil, nil),
)
plug, tmpDir := newTestPlugin(t, nil, fakeCSIClient)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
if err != nil {
t.Fatalf("failed to create new attacher: %v", err)
}
csiAttacher := attacher.(*csiAttacher)
spec := volume.NewSpecFromPersistentVolume(makeTestPV("test-pv", 10, test.driver, "test-vol"), false)
_, err = csiAttacher.WaitForAttach(spec, "", nil, time.Second)
if err != nil && !test.expectError {
t.Errorf("Unexpected error: %s", err)
}
if err == nil && test.expectError {
t.Errorf("Expected error, got none")
}
})
}
}
@@ -236,7 +384,7 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
}
for i, tc := range testCases {
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t, nil)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
@@ -286,7 +434,7 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
}
func TestAttacherVolumesAreAttached(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
@@ -373,7 +521,7 @@ func TestAttacherDetach(t *testing.T) {
for _, tc := range testCases {
t.Logf("running test: %v", tc.name)
plug, fakeWatcher, tmpDir, client := newTestWatchPlugin(t)
plug, fakeWatcher, tmpDir, client := newTestWatchPlugin(t, nil)
defer os.RemoveAll(tmpDir)
if tc.reactor != nil {
client.PrependReactor("*", "*", tc.reactor)
@@ -422,7 +570,7 @@ func TestAttacherDetach(t *testing.T) {
func TestAttacherGetDeviceMountPath(t *testing.T) {
// Setup
// Create a new attacher
plug, _, tmpDir, _ := newTestWatchPlugin(t)
plug, _, tmpDir, _ := newTestWatchPlugin(t, nil)
defer os.RemoveAll(tmpDir)
attacher, err0 := plug.NewAttacher()
if err0 != nil {
@@ -522,10 +670,6 @@ func TestAttacherMountDevice(t *testing.T) {
deviceMountPath: "path2",
stageUnstageSet: false,
},
{
testName: "stage_unstage not set no vars should not fail",
stageUnstageSet: false,
},
}
for _, tc := range testCases {
@@ -535,7 +679,7 @@ func TestAttacherMountDevice(t *testing.T) {
// Setup
// Create a new attacher
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t, nil)
defer os.RemoveAll(tmpDir)
attacher, err0 := plug.NewAttacher()
if err0 != nil {
@@ -544,6 +688,10 @@ func TestAttacherMountDevice(t *testing.T) {
csiAttacher := attacher.(*csiAttacher)
csiAttacher.csiClient = setupClient(t, tc.stageUnstageSet)
if tc.deviceMountPath != "" {
tc.deviceMountPath = filepath.Join(tmpDir, tc.deviceMountPath)
}
nodeName := string(csiAttacher.plugin.host.GetNodeName())
// Create spec
@@ -588,12 +736,12 @@ func TestAttacherMountDevice(t *testing.T) {
t.Errorf("got wrong number of staged volumes, expecting %v got: %v", numStaged, len(staged))
}
if tc.stageUnstageSet {
gotPath, ok := staged[tc.volName]
vol, ok := staged[tc.volName]
if !ok {
t.Errorf("could not find staged volume: %s", tc.volName)
}
if gotPath != tc.deviceMountPath {
t.Errorf("expected mount path: %s. got: %s", tc.deviceMountPath, gotPath)
if vol.Path != tc.deviceMountPath {
t.Errorf("expected mount path: %s. got: %s", tc.deviceMountPath, vol.Path)
}
}
}
@@ -604,51 +752,56 @@ func TestAttacherUnmountDevice(t *testing.T) {
testName string
volID string
deviceMountPath string
jsonFile string
createPV bool
stageUnstageSet bool
shouldFail bool
}{
{
testName: "normal",
testName: "normal, json file exists",
volID: "project/zone/test-vol1",
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
jsonFile: `{"driverName": "csi", "volumeHandle":"project/zone/test-vol1"}`,
createPV: false,
stageUnstageSet: true,
},
{
testName: "no volID",
testName: "normal, json file doesn't exist -> use PV",
volID: "project/zone/test-vol1",
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
jsonFile: "",
createPV: true,
stageUnstageSet: true,
},
{
testName: "invalid json -> use PV",
volID: "project/zone/test-vol1",
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
jsonFile: `{"driverName"}}`,
createPV: true,
stageUnstageSet: true,
},
{
testName: "no json, no PV.volID",
volID: "",
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
stageUnstageSet: true,
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
jsonFile: "",
createPV: true,
shouldFail: true,
},
{
testName: "no device mount path",
testName: "no json, no PV",
volID: "project/zone/test-vol1",
deviceMountPath: "",
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
jsonFile: "",
createPV: false,
stageUnstageSet: true,
shouldFail: true,
},
{
testName: "missing part of device mount path",
volID: "project/zone/test-vol1",
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
stageUnstageSet: true,
shouldFail: true,
},
{
testName: "test volume name mismatch",
volID: "project/zone/test-vol1",
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
stageUnstageSet: true,
shouldFail: true,
},
{
testName: "stage_unstage not set",
volID: "project/zone/test-vol1",
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
stageUnstageSet: false,
},
{
testName: "stage_unstage not set no vars should not fail",
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
jsonFile: `{"driverName":"test-driver","volumeHandle":"test-vol1"}`,
stageUnstageSet: false,
},
}
@@ -657,7 +810,7 @@ func TestAttacherUnmountDevice(t *testing.T) {
t.Logf("Running test case: %s", tc.testName)
// Setup
// Create a new attacher
plug, _, tmpDir, _ := newTestWatchPlugin(t)
plug, _, tmpDir, _ := newTestWatchPlugin(t, nil)
defer os.RemoveAll(tmpDir)
attacher, err0 := plug.NewAttacher()
if err0 != nil {
@@ -666,29 +819,45 @@ func TestAttacherUnmountDevice(t *testing.T) {
csiAttacher := attacher.(*csiAttacher)
csiAttacher.csiClient = setupClient(t, tc.stageUnstageSet)
if tc.deviceMountPath != "" {
tc.deviceMountPath = filepath.Join(tmpDir, tc.deviceMountPath)
}
// Add the volume to NodeStagedVolumes
cdc := csiAttacher.csiClient.(*fakeCsiDriverClient)
cdc.nodeClient.AddNodeStagedVolume(tc.volID, tc.deviceMountPath)
cdc.nodeClient.AddNodeStagedVolume(tc.volID, tc.deviceMountPath, nil)
// Make the PV for this object
// Make JSON for this object
if tc.deviceMountPath != "" {
if err := os.MkdirAll(tc.deviceMountPath, 0755); err != nil {
t.Fatalf("error creating directory %s: %s", tc.deviceMountPath, err)
}
}
dir := filepath.Dir(tc.deviceMountPath)
// dir is now /var/lib/kubelet/plugins/kubernetes.io/csi/pv/{pvname}
pvName := filepath.Base(dir)
pv := makeTestPV(pvName, 5, "csi", tc.volID)
_, err := csiAttacher.k8s.CoreV1().PersistentVolumes().Create(pv)
if err != nil && !tc.shouldFail {
t.Fatalf("Failed to create PV: %v", err)
if tc.jsonFile != "" {
dataPath := filepath.Join(dir, volDataFileName)
if err := ioutil.WriteFile(dataPath, []byte(tc.jsonFile), 0644); err != nil {
t.Fatalf("error creating %s: %s", dataPath, err)
}
}
if tc.createPV {
// Make the PV for this object
pvName := filepath.Base(dir)
pv := makeTestPV(pvName, 5, "csi", tc.volID)
_, err := csiAttacher.k8s.CoreV1().PersistentVolumes().Create(pv)
if err != nil && !tc.shouldFail {
t.Fatalf("Failed to create PV: %v", err)
}
}
// Run
err = csiAttacher.UnmountDevice(tc.deviceMountPath)
err := csiAttacher.UnmountDevice(tc.deviceMountPath)
// Verify
if err != nil {
if !tc.shouldFail {
t.Errorf("test should not fail, but error occurred: %v", err)
}
return
continue
}
if err == nil && tc.shouldFail {
t.Errorf("test should fail, but no error occurred")
@@ -711,11 +880,23 @@ func TestAttacherUnmountDevice(t *testing.T) {
t.Errorf("could not find expected staged volume: %s", tc.volID)
}
if tc.jsonFile != "" && !tc.shouldFail {
dataPath := filepath.Join(dir, volDataFileName)
if _, err := os.Stat(dataPath); !os.IsNotExist(err) {
if err != nil {
t.Errorf("error checking file %s: %s", dataPath, err)
} else {
t.Errorf("json file %s should not exists, but it does", dataPath)
}
} else {
t.Logf("json file %s was correctly removed", dataPath)
}
}
}
}
// create a plugin mgr to load plugins and setup a fake client
func newTestWatchPlugin(t *testing.T) (*csiPlugin, *watch.RaceFreeFakeWatcher, string, *fakeclient.Clientset) {
func newTestWatchPlugin(t *testing.T, csiClient *fakecsi.Clientset) (*csiPlugin, *watch.RaceFreeFakeWatcher, string, *fakeclient.Clientset) {
tmpDir, err := utiltesting.MkTmpdir("csi-test")
if err != nil {
t.Fatalf("can't create temp dir: %v", err)
@@ -725,10 +906,15 @@ func newTestWatchPlugin(t *testing.T) (*csiPlugin, *watch.RaceFreeFakeWatcher, s
fakeWatcher := watch.NewRaceFreeFake()
fakeClient.Fake.PrependWatchReactor("*", core.DefaultWatchReactor(fakeWatcher, nil))
fakeClient.Fake.WatchReactionChain = fakeClient.Fake.WatchReactionChain[:1]
host := volumetest.NewFakeVolumeHost(
if csiClient == nil {
csiClient = fakecsi.NewSimpleClientset()
}
host := volumetest.NewFakeVolumeHostWithCSINodeName(
tmpDir,
fakeClient,
csiClient,
nil,
"node",
)
plugMgr := &volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
@@ -743,5 +929,12 @@ func newTestWatchPlugin(t *testing.T) (*csiPlugin, *watch.RaceFreeFakeWatcher, s
t.Fatalf("cannot assert plugin to be type csiPlugin")
}
if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
// Wait until the informer in CSI volume plugin has all CSIDrivers.
wait.PollImmediate(testInformerSyncPeriod, testInformerSyncTimeout, func() (bool, error) {
return csiPlug.csiDriverInformer.Informer().HasSynced(), nil
})
}
return csiPlug, fakeWatcher, tmpDir, fakeClient
}

View File

@@ -47,19 +47,20 @@ type csiBlockMapper struct {
var _ volume.BlockVolumeMapper = &csiBlockMapper{}
// GetGlobalMapPath returns a path (on the node) where the devicePath will be symlinked to
// Example: plugins/kubernetes.io/csi/volumeDevices/{volumeID}
// GetGlobalMapPath returns a path (on the node) to a device file which will be symlinked to
// Example: plugins/kubernetes.io/csi/volumeDevices/{volumeID}/dev
func (m *csiBlockMapper) GetGlobalMapPath(spec *volume.Spec) (string, error) {
dir := getVolumeDevicePluginDir(spec.Name(), m.plugin.host)
glog.V(4).Infof(log("blockMapper.GetGlobalMapPath = %s", dir))
return dir, nil
}
// GetPodDeviceMapPath returns pod's device map path and volume name
// path: pods/{podUid}/volumeDevices/kubernetes.io~csi/, {volumeID}
// GetPodDeviceMapPath returns pod's device file which will be mapped to a volume
// returns: pods/{podUid}/volumeDevices/kubernetes.io~csi/{volumeID}/dev, {volumeID}
func (m *csiBlockMapper) GetPodDeviceMapPath() (string, string) {
path, specName := m.plugin.host.GetPodVolumeDeviceDir(m.podUID, csiPluginName), m.specName
glog.V(4).Infof(log("blockMapper.GetPodDeviceMapPath = %s", path))
path := filepath.Join(m.plugin.host.GetPodVolumeDeviceDir(m.podUID, csiPluginName), m.specName, "dev")
specName := m.specName
glog.V(4).Infof(log("blockMapper.GetPodDeviceMapPath [path=%s; name=%s]", path, specName))
return path, specName
}
@@ -87,6 +88,9 @@ func (m *csiBlockMapper) SetUpDevice() (string, error) {
return "", err
}
globalMapPathBlockFile := filepath.Join(globalMapPath, "file")
glog.V(4).Infof(log("blockMapper.SetupDevice global device map path file set [%s]", globalMapPathBlockFile))
csi := m.csiClient
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
@@ -128,13 +132,25 @@ func (m *csiBlockMapper) SetUpDevice() (string, error) {
}
}
// create globalMapPath before call to NodeStageVolume
// setup path globalMapPath and block file before call to NodeStageVolume
if err := os.MkdirAll(globalMapPath, 0750); err != nil {
glog.Error(log("blockMapper.SetupDevice failed to create dir %s: %v", globalMapPath, err))
return "", err
}
glog.V(4).Info(log("blockMapper.SetupDevice created global device map path successfully [%s]", globalMapPath))
// create block device file
blockFile, err := os.OpenFile(globalMapPathBlockFile, os.O_CREATE|os.O_RDWR, 0750)
if err != nil {
glog.Error(log("blockMapper.SetupDevice failed to create dir %s: %v", globalMapPathBlockFile, err))
return "", err
}
if err := blockFile.Close(); err != nil {
glog.Error(log("blockMapper.SetupDevice failed to close file %s: %v", globalMapPathBlockFile, err))
return "", err
}
glog.V(4).Info(log("blockMapper.SetupDevice created global map path block device file successfully [%s]", globalMapPathBlockFile))
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
accessMode := v1.ReadWriteOnce
if m.spec.PersistentVolume.Spec.AccessModes != nil {
@@ -144,7 +160,7 @@ func (m *csiBlockMapper) SetUpDevice() (string, error) {
err = csi.NodeStageVolume(ctx,
csiSource.VolumeHandle,
publishVolumeInfo,
globalMapPath,
globalMapPathBlockFile,
fsTypeBlockName,
accessMode,
nodeStageSecrets,
@@ -158,8 +174,8 @@ func (m *csiBlockMapper) SetUpDevice() (string, error) {
return "", err
}
glog.V(4).Infof(log("blockMapper.SetupDevice successfully requested NodeStageVolume [%s]", globalMapPath))
return globalMapPath, nil
glog.V(4).Infof(log("blockMapper.SetupDevice successfully requested NodeStageVolume [%s]", globalMapPathBlockFile))
return globalMapPathBlockFile, nil
}
func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
@@ -176,16 +192,29 @@ func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, vol
csiSource, err := getCSISourceFromSpec(m.spec)
if err != nil {
glog.Error(log("blockMapper.Map failed to get CSI persistent source: %v", err))
glog.Error(log("blockMapper.MapDevice failed to get CSI persistent source: %v", err))
return err
}
dir := filepath.Join(volumeMapPath, volumeMapName)
csi := m.csiClient
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
globalMapPathBlockFile := devicePath
dir, _ := m.GetPodDeviceMapPath()
targetBlockFilePath := filepath.Join(dir, "file")
glog.V(4).Infof(log("blockMapper.MapDevice target volume map file path %s", targetBlockFilePath))
stageCapable, err := hasStageUnstageCapability(ctx, csi)
if err != nil {
glog.Error(log("blockMapper.MapDevice failed to check for STAGE_UNSTAGE_VOLUME capabilty: %v", err))
return err
}
if !stageCapable {
globalMapPathBlockFile = ""
}
nodeName := string(m.plugin.host.GetNodeName())
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
@@ -213,10 +242,22 @@ func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, vol
}
if err := os.MkdirAll(dir, 0750); err != nil {
glog.Error(log("blockMapper.MapDevice failed to create dir %#v: %v", dir, err))
glog.Error(log("blockMapper.MapDevice failed to create dir %s: %v", dir, err))
return err
}
glog.V(4).Info(log("blockMapper.MapDevice created NodePublish path [%s]", dir))
glog.V(4).Info(log("blockMapper.MapDevice created target volume map path successfully [%s]", dir))
// create target map volume block file
targetBlockFile, err := os.OpenFile(targetBlockFilePath, os.O_CREATE|os.O_RDWR, 0750)
if err != nil {
glog.Error(log("blockMapper.MapDevice failed to create file %s: %v", targetBlockFilePath, err))
return err
}
if err := targetBlockFile.Close(); err != nil {
glog.Error(log("blockMapper.MapDevice failed to close file %s: %v", targetBlockFilePath, err))
return err
}
glog.V(4).Info(log("blockMapper.MapDevice created target volume map file successfully [%s]", targetBlockFilePath))
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
accessMode := v1.ReadWriteOnce
@@ -228,8 +269,8 @@ func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, vol
ctx,
m.volumeID,
m.readOnly,
globalMapPath,
dir,
globalMapPathBlockFile,
targetBlockFilePath,
accessMode,
publishVolumeInfo,
csiSource.VolumeAttributes,
@@ -240,7 +281,7 @@ func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, vol
if err != nil {
glog.Errorf(log("blockMapper.MapDevice failed: %v", err))
if err := os.RemoveAll(dir); err != nil {
glog.Error(log("blockMapper.MapDevice failed to remove mount dir after a NodePublish() error [%s]: %v", dir, err))
glog.Error(log("blockMapper.MapDevice failed to remove mapped dir after a NodePublish() error [%s]: %v", dir, err))
}
return err
}

View File

@@ -31,7 +31,7 @@ import (
)
func TestBlockMapperGetGlobalMapPath(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
// TODO (vladimirvivien) specName with slashes will not work
@@ -77,13 +77,14 @@ func TestBlockMapperGetGlobalMapPath(t *testing.T) {
}
func TestBlockMapperSetupDevice(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
fakeClient := fakeclient.NewSimpleClientset()
host := volumetest.NewFakeVolumeHostWithNodeName(
host := volumetest.NewFakeVolumeHostWithCSINodeName(
tmpDir,
fakeClient,
nil,
nil,
"fakeNode",
)
plug.host = host
@@ -123,24 +124,29 @@ func TestBlockMapperSetupDevice(t *testing.T) {
t.Fatalf("mapper failed to GetGlobalMapPath: %v", err)
}
if devicePath != globalMapPath {
if devicePath != filepath.Join(globalMapPath, "file") {
t.Fatalf("mapper.SetupDevice returned unexpected path %s instead of %v", devicePath, globalMapPath)
}
vols := csiMapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodeStagedVolumes()
if vols[csiMapper.volumeID] != devicePath {
vol, ok := vols[csiMapper.volumeID]
if !ok {
t.Error("csi server may not have received NodePublishVolume call")
}
if vol.Path != devicePath {
t.Errorf("csi server expected device path %s, got %s", devicePath, vol.Path)
}
}
func TestBlockMapperMapDevice(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
fakeClient := fakeclient.NewSimpleClientset()
host := volumetest.NewFakeVolumeHostWithNodeName(
host := volumetest.NewFakeVolumeHostWithCSINodeName(
tmpDir,
fakeClient,
nil,
nil,
"fakeNode",
)
plug.host = host
@@ -186,28 +192,34 @@ func TestBlockMapperMapDevice(t *testing.T) {
t.Fatalf("mapper failed to GetGlobalMapPath: %v", err)
}
if _, err := os.Stat(filepath.Join(volumeMapPath, volName)); err != nil {
podVolumeBlockFilePath := filepath.Join(volumeMapPath, "file")
if _, err := os.Stat(podVolumeBlockFilePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("mapper.MapDevice failed, volume path not created: %s", volumeMapPath)
t.Errorf("mapper.MapDevice failed, volume path not created: %v", err)
} else {
t.Errorf("mapper.MapDevice failed: %v", err)
}
}
pubs := csiMapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
if pubs[csiMapper.volumeID] != volumeMapPath {
vol, ok := pubs[csiMapper.volumeID]
if !ok {
t.Error("csi server may not have received NodePublishVolume call")
}
if vol.Path != podVolumeBlockFilePath {
t.Errorf("csi server expected path %s, got %s", podVolumeBlockFilePath, vol.Path)
}
}
func TestBlockMapperTearDownDevice(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
fakeClient := fakeclient.NewSimpleClientset()
host := volumetest.NewFakeVolumeHostWithNodeName(
host := volumetest.NewFakeVolumeHostWithCSINodeName(
tmpDir,
fakeClient,
nil,
nil,
"fakeNode",
)
plug.host = host

View File

@@ -32,6 +32,11 @@ import (
)
type csiClient interface {
NodeGetInfo(ctx context.Context) (
nodeID string,
maxVolumePerNode int64,
accessibleTopology *csipb.Topology,
err error)
NodePublishVolume(
ctx context.Context,
volumeid string,
@@ -75,6 +80,24 @@ func newCsiDriverClient(driverName string) *csiDriverClient {
return c
}
func (c *csiDriverClient) NodeGetInfo(ctx context.Context) (
nodeID string,
maxVolumePerNode int64,
accessibleTopology *csipb.Topology,
err error) {
glog.V(4).Info(log("calling NodeGetInfo rpc"))
conn, err := newGrpcConn(c.driverName)
if err != nil {
return "", 0, nil, err
}
defer conn.Close()
nodeClient := csipb.NewNodeClient(conn)
res, err := nodeClient.NodeGetInfo(ctx, &csipb.NodeGetInfoRequest{})
return res.GetNodeId(), res.GetMaxVolumesPerNode(), res.GetAccessibleTopology(), nil
}
func (c *csiDriverClient) NodePublishVolume(
ctx context.Context,
volID string,

View File

@@ -24,6 +24,7 @@ import (
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
api "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/volume/csi/fake"
"reflect"
)
type fakeCsiDriverClient struct {
@@ -38,6 +39,15 @@ func newFakeCsiDriverClient(t *testing.T, stagingCapable bool) *fakeCsiDriverCli
}
}
func (c *fakeCsiDriverClient) NodeGetInfo(ctx context.Context) (
nodeID string,
maxVolumePerNode int64,
accessibleTopology *csipb.Topology,
err error) {
resp, err := c.nodeClient.NodeGetInfo(ctx, &csipb.NodeGetInfoRequest{})
return resp.GetNodeId(), resp.GetMaxVolumesPerNode(), resp.GetAccessibleTopology(), err
}
func (c *fakeCsiDriverClient) NodePublishVolume(
ctx context.Context,
volID string,
@@ -141,6 +151,60 @@ func setupClient(t *testing.T, stageUnstageSet bool) csiClient {
return newFakeCsiDriverClient(t, stageUnstageSet)
}
func TestClientNodeGetInfo(t *testing.T) {
testCases := []struct {
name string
expectedNodeID string
expectedMaxVolumePerNode int64
expectedAccessibleTopology *csipb.Topology
mustFail bool
err error
}{
{
name: "test ok",
expectedNodeID: "node1",
expectedMaxVolumePerNode: 16,
expectedAccessibleTopology: &csipb.Topology{
Segments: map[string]string{"com.example.csi-topology/zone": "zone1"},
},
},
{name: "grpc error", mustFail: true, err: errors.New("grpc error")},
}
client := setupClient(t, false /* stageUnstageSet */)
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
client.(*fakeCsiDriverClient).nodeClient.SetNodeGetInfoResp(&csipb.NodeGetInfoResponse{
NodeId: tc.expectedNodeID,
MaxVolumesPerNode: tc.expectedMaxVolumePerNode,
AccessibleTopology: tc.expectedAccessibleTopology,
})
nodeID, maxVolumePerNode, accessibleTopology, err := client.NodeGetInfo(context.Background())
if tc.mustFail && err == nil {
t.Error("expected an error but got none")
}
if !tc.mustFail && err != nil {
t.Errorf("expected no errors but got: %v", err)
}
if nodeID != tc.expectedNodeID {
t.Errorf("expected nodeID: %v; got: %v", tc.expectedNodeID, nodeID)
}
if maxVolumePerNode != tc.expectedMaxVolumePerNode {
t.Errorf("expected maxVolumePerNode: %v; got: %v", tc.expectedMaxVolumePerNode, maxVolumePerNode)
}
if !reflect.DeepEqual(accessibleTopology, tc.expectedAccessibleTopology) {
t.Errorf("expected accessibleTopology: %v; got: %v", *tc.expectedAccessibleTopology, *accessibleTopology)
}
}
}
func TestClientNodePublishVolume(t *testing.T) {
testCases := []struct {
name string

View File

@@ -26,16 +26,16 @@ import (
"github.com/golang/glog"
api "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/features"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
const defaultFSType = "ext4"
//TODO (vladimirvivien) move this in a central loc later
var (
volDataKey = struct {
@@ -51,6 +51,7 @@ var (
"nodeName",
"attachmentID",
}
currentPodInfoMountVersion = "v1"
)
type csiMountMgr struct {
@@ -115,9 +116,6 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
}
csi := c.csiClient
nodeName := string(c.plugin.host.GetNodeName())
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
@@ -136,20 +134,13 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
return err
}
}
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
if c.volumeInfo == nil {
attachment, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
nodeName := string(c.plugin.host.GetNodeName())
c.volumeInfo, err = c.plugin.getPublishVolumeInfo(c.k8s, c.volumeID, c.driverName, nodeName)
if err != nil {
glog.Error(log("mounter.SetupAt failed while getting volume attachment [id=%v]: %v", attachID, err))
return err
}
if attachment == nil {
glog.Error(log("unable to find VolumeAttachment [id=%s]", attachID))
return errors.New("no existing VolumeAttachment found")
}
c.volumeInfo = attachment.Status.AttachmentMetadata
}
attribs := csiSource.VolumeAttributes
@@ -176,10 +167,23 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
accessMode = c.spec.PersistentVolume.Spec.AccessModes[0]
}
fsType := csiSource.FSType
if len(fsType) == 0 {
fsType = defaultFSType
// Inject pod information into volume_attributes
podAttrs, err := c.podAttributes()
if err != nil {
glog.Error(log("mouter.SetUpAt failed to assemble volume attributes: %v", err))
return err
}
if podAttrs != nil {
if attribs == nil {
attribs = podAttrs
} else {
for k, v := range podAttrs {
attribs[k] = v
}
}
}
fsType := csiSource.FSType
err = csi.NodePublishVolume(
ctx,
c.volumeID,
@@ -202,37 +206,63 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
}
// apply volume ownership
if !c.readOnly && fsGroup != nil {
err := volume.SetVolumeOwnership(c, fsGroup)
if err != nil {
// attempt to rollback mount.
glog.Error(log("mounter.SetupAt failed to set fsgroup volume ownership for [%s]: %v", c.volumeID, err))
glog.V(4).Info(log("mounter.SetupAt attempting to unpublish volume %s due to previous error", c.volumeID))
if unpubErr := csi.NodeUnpublishVolume(ctx, c.volumeID, dir); unpubErr != nil {
glog.Error(log(
"mounter.SetupAt failed to unpublish volume [%s]: %v (caused by previous NodePublish error: %v)",
c.volumeID, unpubErr, err,
))
return fmt.Errorf("%v (caused by %v)", unpubErr, err)
}
// The following logic is derived from https://github.com/kubernetes/kubernetes/issues/66323
// if fstype is "", then skip fsgroup (could be indication of non-block filesystem)
// if fstype is provided and pv.AccessMode == ReadWriteOnly, then apply fsgroup
if unmountErr := removeMountDir(c.plugin, dir); unmountErr != nil {
glog.Error(log(
"mounter.SetupAt failed to clean mount dir [%s]: %v (caused by previous NodePublish error: %v)",
dir, unmountErr, err,
))
return fmt.Errorf("%v (caused by %v)", unmountErr, err)
}
return err
err = c.applyFSGroup(fsType, fsGroup)
if err != nil {
// attempt to rollback mount.
fsGrpErr := fmt.Errorf("applyFSGroup failed for vol %s: %v", c.volumeID, err)
if unpubErr := csi.NodeUnpublishVolume(ctx, c.volumeID, dir); unpubErr != nil {
glog.Error(log("NodeUnpublishVolume failed for [%s]: %v", c.volumeID, unpubErr))
return fsGrpErr
}
glog.V(4).Info(log("mounter.SetupAt sets fsGroup to [%d] for %s", *fsGroup, c.volumeID))
if unmountErr := removeMountDir(c.plugin, dir); unmountErr != nil {
glog.Error(log("removeMountDir failed for [%s]: %v", dir, unmountErr))
return fsGrpErr
}
return fsGrpErr
}
glog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir))
return nil
}
func (c *csiMountMgr) podAttributes() (map[string]string, error) {
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
return nil, nil
}
if c.plugin.csiDriverLister == nil {
return nil, errors.New("CSIDriver lister does not exist")
}
csiDriver, err := c.plugin.csiDriverLister.Get(c.driverName)
if err != nil {
if apierrs.IsNotFound(err) {
glog.V(4).Infof(log("CSIDriver %q not found, not adding pod information", c.driverName))
return nil, nil
}
return nil, err
}
// if PodInfoOnMountVersion is not set or not v1 we do not set pod attributes
if csiDriver.Spec.PodInfoOnMountVersion == nil || *csiDriver.Spec.PodInfoOnMountVersion != currentPodInfoMountVersion {
glog.V(4).Infof(log("CSIDriver %q does not require pod information", c.driverName))
return nil, nil
}
attrs := map[string]string{
"csi.storage.k8s.io/pod.name": c.pod.Name,
"csi.storage.k8s.io/pod.namespace": c.pod.Namespace,
"csi.storage.k8s.io/pod.uid": string(c.pod.UID),
"csi.storage.k8s.io/serviceAccount.name": c.pod.Spec.ServiceAccountName,
}
glog.V(4).Infof(log("CSIDriver %q requires pod information", c.driverName))
return attrs, nil
}
func (c *csiMountMgr) GetAttributes() volume.Attributes {
mounter := c.plugin.host.GetMounter(c.plugin.GetPluginName())
path := c.GetPath()
@@ -293,6 +323,43 @@ func (c *csiMountMgr) TearDownAt(dir string) error {
return nil
}
// applyFSGroup applies the volume ownership it derives its logic
// from https://github.com/kubernetes/kubernetes/issues/66323
// 1) if fstype is "", then skip fsgroup (could be indication of non-block filesystem)
// 2) if fstype is provided and pv.AccessMode == ReadWriteOnly and !c.spec.ReadOnly then apply fsgroup
func (c *csiMountMgr) applyFSGroup(fsType string, fsGroup *int64) error {
if fsGroup != nil {
if fsType == "" {
glog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, fsType not provided"))
return nil
}
accessModes := c.spec.PersistentVolume.Spec.AccessModes
if c.spec.PersistentVolume.Spec.AccessModes == nil {
glog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, access modes not provided"))
return nil
}
if !hasReadWriteOnce(accessModes) {
glog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, only support ReadWriteOnce access mode"))
return nil
}
if c.readOnly {
glog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, volume is readOnly"))
return nil
}
err := volume.SetVolumeOwnership(c, fsGroup)
if err != nil {
return err
}
glog.V(4).Info(log("mounter.SetupAt fsGroup [%d] applied successfully to %s", *fsGroup, c.volumeID))
}
return nil
}
// isDirMounted returns the !notMounted result from IsLikelyNotMountPoint check
func isDirMounted(plug *csiPlugin, dir string) (bool, error) {
mounter := plug.host.GetMounter(plug.GetPluginName())

View File

@@ -25,25 +25,35 @@ import (
"path"
"testing"
"reflect"
"github.com/golang/glog"
api "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1beta1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
fakeclient "k8s.io/client-go/kubernetes/fake"
csiapi "k8s.io/csi-api/pkg/apis/csi/v1alpha1"
fakecsi "k8s.io/csi-api/pkg/client/clientset/versioned/fake"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util"
)
var (
testDriver = "test-driver"
testVol = "vol-123"
testns = "test-ns"
testPodUID = types.UID("test-pod")
testDriver = "test-driver"
testVol = "vol-123"
testns = "test-ns"
testPod = "test-pod"
testPodUID = types.UID("test-pod")
testAccount = "test-service-account"
)
func TestMounterGetPath(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
// TODO (vladimirvivien) specName with slashes will not work
@@ -85,83 +95,299 @@ func TestMounterGetPath(t *testing.T) {
}
}
func MounterSetUpTests(t *testing.T, podInfoEnabled bool) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIDriverRegistry, podInfoEnabled)()
tests := []struct {
name string
driver string
attributes map[string]string
expectedAttributes map[string]string
}{
{
name: "no pod info",
driver: "no-info",
attributes: nil,
expectedAttributes: nil,
},
{
name: "no CSIDriver -> no pod info",
driver: "unknown-driver",
attributes: nil,
expectedAttributes: nil,
},
{
name: "CSIDriver with PodInfoRequiredOnMount=nil -> no pod info",
driver: "nil",
attributes: nil,
expectedAttributes: nil,
},
{
name: "no pod info -> keep existing attributes",
driver: "no-info",
attributes: map[string]string{"foo": "bar"},
expectedAttributes: map[string]string{"foo": "bar"},
},
{
name: "add pod info",
driver: "info",
attributes: nil,
expectedAttributes: map[string]string{"csi.storage.k8s.io/pod.uid": "test-pod", "csi.storage.k8s.io/serviceAccount.name": "test-service-account", "csi.storage.k8s.io/pod.name": "test-pod", "csi.storage.k8s.io/pod.namespace": "test-ns"},
},
{
name: "add pod info -> keep existing attributes",
driver: "info",
attributes: map[string]string{"foo": "bar"},
expectedAttributes: map[string]string{"foo": "bar", "csi.storage.k8s.io/pod.uid": "test-pod", "csi.storage.k8s.io/serviceAccount.name": "test-service-account", "csi.storage.k8s.io/pod.name": "test-pod", "csi.storage.k8s.io/pod.namespace": "test-ns"},
},
}
emptyPodMountInfoVersion := ""
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
glog.Infof("Starting test %s", test.name)
fakeClient := fakeclient.NewSimpleClientset()
fakeCSIClient := fakecsi.NewSimpleClientset(
getCSIDriver("no-info", &emptyPodMountInfoVersion, nil),
getCSIDriver("info", &currentPodInfoMountVersion, nil),
getCSIDriver("nil", nil, nil),
)
plug, tmpDir := newTestPlugin(t, fakeClient, fakeCSIClient)
defer os.RemoveAll(tmpDir)
if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
// Wait until the informer in CSI volume plugin has all CSIDrivers.
wait.PollImmediate(testInformerSyncPeriod, testInformerSyncTimeout, func() (bool, error) {
return plug.csiDriverInformer.Informer().HasSynced(), nil
})
}
pv := makeTestPV("test-pv", 10, test.driver, testVol)
pv.Spec.CSI.VolumeAttributes = test.attributes
pvName := pv.GetName()
mounter, err := plug.NewMounter(
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
&api.Pod{
ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns, Name: testPod},
Spec: api.PodSpec{
ServiceAccountName: testAccount,
},
},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Fatal("failed to create CSI mounter")
}
csiMounter := mounter.(*csiMountMgr)
csiMounter.csiClient = setupClient(t, true)
attachID := getAttachmentName(csiMounter.volumeID, csiMounter.driverName, string(plug.host.GetNodeName()))
attachment := &storage.VolumeAttachment{
ObjectMeta: meta.ObjectMeta{
Name: attachID,
},
Spec: storage.VolumeAttachmentSpec{
NodeName: "test-node",
Attacher: csiPluginName,
Source: storage.VolumeAttachmentSource{
PersistentVolumeName: &pvName,
},
},
Status: storage.VolumeAttachmentStatus{
Attached: false,
AttachError: nil,
DetachError: nil,
},
}
_, err = csiMounter.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
if err != nil {
t.Fatalf("failed to setup VolumeAttachment: %v", err)
}
// Mounter.SetUp()
fsGroup := int64(2000)
if err := csiMounter.SetUp(&fsGroup); err != nil {
t.Fatalf("mounter.Setup failed: %v", err)
}
//Test the default value of file system type is not overridden
if len(csiMounter.spec.PersistentVolume.Spec.CSI.FSType) != 0 {
t.Errorf("default value of file system type was overridden by type %s", csiMounter.spec.PersistentVolume.Spec.CSI.FSType)
}
path := csiMounter.GetPath()
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
// ensure call went all the way
pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
vol, ok := pubs[csiMounter.volumeID]
if !ok {
t.Error("csi server may not have received NodePublishVolume call")
}
if vol.Path != csiMounter.GetPath() {
t.Errorf("csi server expected path %s, got %s", csiMounter.GetPath(), vol.Path)
}
if podInfoEnabled {
if !reflect.DeepEqual(vol.Attributes, test.expectedAttributes) {
t.Errorf("csi server expected attributes %+v, got %+v", test.expectedAttributes, vol.Attributes)
}
} else {
// CSIPodInfo feature is disabled, we expect no modifications to attributes.
if !reflect.DeepEqual(vol.Attributes, test.attributes) {
t.Errorf("csi server expected attributes %+v, got %+v", test.attributes, vol.Attributes)
}
}
})
}
}
func TestMounterSetUp(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
t.Run("WithCSIPodInfo", func(t *testing.T) {
MounterSetUpTests(t, true)
})
t.Run("WithoutCSIPodInfo", func(t *testing.T) {
MounterSetUpTests(t, false)
})
}
func TestMounterSetUpWithFSGroup(t *testing.T) {
fakeClient := fakeclient.NewSimpleClientset()
host := volumetest.NewFakeVolumeHostWithNodeName(
tmpDir,
fakeClient,
nil,
"fakeNode",
)
plug.host = host
pv := makeTestPV("test-pv", 10, testDriver, testVol)
pvName := pv.GetName()
plug, tmpDir := newTestPlugin(t, fakeClient, nil)
defer os.RemoveAll(tmpDir)
mounter, err := plug.NewMounter(
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Fatal("failed to create CSI mounter")
}
csiMounter := mounter.(*csiMountMgr)
csiMounter.csiClient = setupClient(t, true)
attachID := getAttachmentName(csiMounter.volumeID, csiMounter.driverName, string(plug.host.GetNodeName()))
attachment := &storage.VolumeAttachment{
ObjectMeta: meta.ObjectMeta{
Name: attachID,
},
Spec: storage.VolumeAttachmentSpec{
NodeName: "test-node",
Attacher: csiPluginName,
Source: storage.VolumeAttachmentSource{
PersistentVolumeName: &pvName,
testCases := []struct {
name string
accessModes []api.PersistentVolumeAccessMode
readOnly bool
fsType string
setFsGroup bool
fsGroup int64
}{
{
name: "default fstype, with no fsgroup (should not apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
readOnly: false,
fsType: "",
},
Status: storage.VolumeAttachmentStatus{
Attached: false,
AttachError: nil,
DetachError: nil,
{
name: "default fstype with fsgroup (should not apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
readOnly: false,
fsType: "",
setFsGroup: true,
fsGroup: 3000,
},
{
name: "fstype, fsgroup, RWM, ROM provided (should not apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteMany,
api.ReadOnlyMany,
},
fsType: "ext4",
setFsGroup: true,
fsGroup: 3000,
},
{
name: "fstype, fsgroup, RWO, but readOnly (should not apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
readOnly: true,
fsType: "ext4",
setFsGroup: true,
fsGroup: 3000,
},
{
name: "fstype, fsgroup, RWO provided (should apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
fsType: "ext4",
setFsGroup: true,
fsGroup: 3000,
},
}
_, err = csiMounter.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
if err != nil {
t.Fatalf("failed to setup VolumeAttachment: %v", err)
}
// Mounter.SetUp()
fsGroup := int64(2000)
if err := csiMounter.SetUp(&fsGroup); err != nil {
t.Fatalf("mounter.Setup failed: %v", err)
}
path := csiMounter.GetPath()
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
for i, tc := range testCases {
t.Logf("Running test %s", tc.name)
volName := fmt.Sprintf("test-vol-%d", i)
pv := makeTestPV("test-pv", 10, testDriver, volName)
pv.Spec.AccessModes = tc.accessModes
pvName := pv.GetName()
spec := volume.NewSpecFromPersistentVolume(pv, tc.readOnly)
if tc.fsType != "" {
spec.PersistentVolume.Spec.CSI.FSType = tc.fsType
}
}
// ensure call went all the way
pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
if pubs[csiMounter.volumeID] != csiMounter.GetPath() {
t.Error("csi server may not have received NodePublishVolume call")
mounter, err := plug.NewMounter(
spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Fatal("failed to create CSI mounter")
}
csiMounter := mounter.(*csiMountMgr)
csiMounter.csiClient = setupClient(t, true)
attachID := getAttachmentName(csiMounter.volumeID, csiMounter.driverName, string(plug.host.GetNodeName()))
attachment := makeTestAttachment(attachID, "test-node", pvName)
_, err = csiMounter.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
if err != nil {
t.Errorf("failed to setup VolumeAttachment: %v", err)
continue
}
// Mounter.SetUp()
var fsGroupPtr *int64
if tc.setFsGroup {
fsGroup := tc.fsGroup
fsGroupPtr = &fsGroup
}
if err := csiMounter.SetUp(fsGroupPtr); err != nil {
t.Fatalf("mounter.Setup failed: %v", err)
}
//Test the default value of file system type is not overridden
if len(csiMounter.spec.PersistentVolume.Spec.CSI.FSType) != len(tc.fsType) {
t.Errorf("file system type was overridden by type %s", csiMounter.spec.PersistentVolume.Spec.CSI.FSType)
}
// ensure call went all the way
pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
if pubs[csiMounter.volumeID].Path != csiMounter.GetPath() {
t.Error("csi server may not have received NodePublishVolume call")
}
}
}
func TestUnmounterTeardown(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
@@ -210,7 +436,7 @@ func TestUnmounterTeardown(t *testing.T) {
}
func TestSaveVolumeData(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
@@ -256,3 +482,15 @@ func TestSaveVolumeData(t *testing.T) {
}
}
}
func getCSIDriver(name string, podInfoMountVersion *string, attachable *bool) *csiapi.CSIDriver {
return &csiapi.CSIDriver{
ObjectMeta: meta.ObjectMeta{
Name: name,
},
Spec: csiapi.CSIDriverSpec{
PodInfoOnMountVersion: podInfoMountVersion,
AttachRequired: attachable,
},
}
}

View File

@@ -25,15 +25,23 @@ import (
"sync"
"time"
"context"
"github.com/golang/glog"
api "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
csiapiinformer "k8s.io/csi-api/pkg/client/informers/externalversions"
csiinformer "k8s.io/csi-api/pkg/client/informers/externalversions/csi/v1alpha1"
csilister "k8s.io/csi-api/pkg/client/listers/csi/v1alpha1"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/csi/labelmanager"
"k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager"
)
const (
@@ -48,11 +56,16 @@ const (
volNameSep = "^"
volDataFileName = "vol_data.json"
fsTypeBlockName = "block"
// TODO: increase to something useful
csiResyncPeriod = time.Minute
)
type csiPlugin struct {
host volume.VolumeHost
blockEnabled bool
host volume.VolumeHost
blockEnabled bool
csiDriverLister csilister.CSIDriverLister
csiDriverInformer csiinformer.CSIDriverInformer
}
// ProbeVolumePlugins returns implemented plugins
@@ -77,43 +90,92 @@ type csiDriversStore struct {
sync.RWMutex
}
// RegistrationHandler is the handler which is fed to the pluginwatcher API.
type RegistrationHandler struct {
}
// TODO (verult) consider using a struct instead of global variables
// csiDrivers map keep track of all registered CSI drivers on the node and their
// corresponding sockets
var csiDrivers csiDriversStore
var lm labelmanager.Interface
var nim nodeinfomanager.Interface
// RegistrationCallback is called by kubelet's plugin watcher upon detection
// PluginHandler is the plugin registration handler interface passed to the
// pluginwatcher module in kubelet
var PluginHandler = &RegistrationHandler{}
// ValidatePlugin is called by kubelet's plugin watcher upon detection
// of a new registration socket opened by CSI Driver registrar side car.
func RegistrationCallback(pluginName string, endpoint string, versions []string, socketPath string) (error, chan bool) {
func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string, versions []string) error {
glog.Infof(log("Trying to register a new plugin with name: %s endpoint: %s versions: %s",
pluginName, endpoint, strings.Join(versions, ",")))
glog.Infof(log("Callback from kubelet with plugin name: %s endpoint: %s versions: %s socket path: %s",
pluginName, endpoint, strings.Join(versions, ","), socketPath))
return nil
}
if endpoint == "" {
endpoint = socketPath
}
// Calling nodeLabelManager to update label for newly registered CSI driver
err := lm.AddLabels(pluginName)
// RegisterPlugin is called when a plugin can be registered
func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string) error {
glog.Infof(log("Register new plugin with name: %s at endpoint: %s", pluginName, endpoint))
func() {
// Storing endpoint of newly registered CSI driver into the map, where CSI driver name will be the key
// all other CSI components will be able to get the actual socket of CSI drivers by its name.
// It's not necessary to lock the entire RegistrationCallback() function because only the CSI
// client depends on this driver map, and the CSI client does not depend on node information
// updated in the rest of the function.
csiDrivers.Lock()
defer csiDrivers.Unlock()
csiDrivers.driversMap[pluginName] = csiDriver{driverName: pluginName, driverEndpoint: endpoint}
}()
// Get node info from the driver.
csi := newCsiDriverClient(pluginName)
// TODO (verult) retry with exponential backoff, possibly added in csi client library.
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
driverNodeID, maxVolumePerNode, accessibleTopology, err := csi.NodeGetInfo(ctx)
if err != nil {
return err, nil
unregisterDriver(pluginName)
return fmt.Errorf("error during CSI NodeGetInfo() call: %v", err)
}
// Storing endpoint of newly registered CSI driver into the map, where CSI driver name will be the key
// all other CSI components will be able to get the actual socket of CSI drivers by its name.
csiDrivers.Lock()
defer csiDrivers.Unlock()
csiDrivers.driversMap[pluginName] = csiDriver{driverName: pluginName, driverEndpoint: endpoint}
return nil, nil
err = nim.AddNodeInfo(pluginName, driverNodeID, maxVolumePerNode, accessibleTopology)
if err != nil {
unregisterDriver(pluginName)
return fmt.Errorf("error updating CSI node info in the cluster: %v", err)
}
return nil
}
// DeRegisterPlugin is called when a plugin removed it's socket, signaling
// it is no longer available
// TODO: Handle DeRegistration
func (h *RegistrationHandler) DeRegisterPlugin(pluginName string) {
}
func (p *csiPlugin) Init(host volume.VolumeHost) error {
glog.Info(log("plugin initializing..."))
p.host = host
if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
csiClient := host.GetCSIClient()
if csiClient == nil {
glog.Warning("The client for CSI Custom Resources is not available, skipping informer initialization")
} else {
// Start informer for CSIDrivers.
factory := csiapiinformer.NewSharedInformerFactory(csiClient, csiResyncPeriod)
p.csiDriverInformer = factory.Csi().V1alpha1().CSIDrivers()
p.csiDriverLister = p.csiDriverInformer.Lister()
go factory.Start(wait.NeverStop)
}
}
// Initializing csiDrivers map and label management channels
csiDrivers = csiDriversStore{driversMap: map[string]csiDriver{}}
lm = labelmanager.NewLabelManager(host.GetNodeName(), host.GetKubeClient())
nim = nodeinfomanager.NewNodeInfoManager(host.GetNodeName(), host)
return nil
}
@@ -249,6 +311,7 @@ func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.S
glog.V(4).Info(log("plugin.ConstructVolumeSpec extracted [%#v]", volData))
fsMode := api.PersistentVolumeFilesystem
pv := &api.PersistentVolume{
ObjectMeta: meta.ObjectMeta{
Name: volData[volDataKey.specVolID],
@@ -260,6 +323,7 @@ func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.S
VolumeHandle: volData[volDataKey.volHandle],
},
},
VolumeMode: &fsMode,
},
}
@@ -268,7 +332,7 @@ func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.S
func (p *csiPlugin) SupportsMountOption() bool {
// TODO (vladimirvivien) use CSI VolumeCapability.MountVolume.mount_flags
// to probe for the result for this method:w
// to probe for the result for this method
return false
}
@@ -279,6 +343,8 @@ func (p *csiPlugin) SupportsBulkVolumeVerification() bool {
// volume.AttachableVolumePlugin methods
var _ volume.AttachableVolumePlugin = &csiPlugin{}
var _ volume.DeviceMountableVolumePlugin = &csiPlugin{}
func (p *csiPlugin) NewAttacher() (volume.Attacher, error) {
k8s := p.host.GetKubeClient()
if k8s == nil {
@@ -293,6 +359,10 @@ func (p *csiPlugin) NewAttacher() (volume.Attacher, error) {
}, nil
}
func (p *csiPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return p.NewAttacher()
}
func (p *csiPlugin) NewDetacher() (volume.Detacher, error) {
k8s := p.host.GetKubeClient()
if k8s == nil {
@@ -307,9 +377,13 @@ func (p *csiPlugin) NewDetacher() (volume.Detacher, error) {
}, nil
}
func (p *csiPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return p.NewDetacher()
}
func (p *csiPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
m := p.host.GetMounter(p.GetPluginName())
return mount.GetMountRefs(m, deviceMountPath)
return m.GetMountRefs(deviceMountPath)
}
// BlockVolumePlugin methods
@@ -346,6 +420,7 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt
driverName: pvSource.Driver,
readOnly: readOnly,
spec: spec,
specName: spec.Name(),
podUID: podRef.UID,
}
@@ -441,3 +516,60 @@ func (p *csiPlugin) ConstructBlockVolumeSpec(podUID types.UID, specVolName, mapP
return volume.NewSpecFromPersistentVolume(pv, false), nil
}
func (p *csiPlugin) skipAttach(driver string) (bool, error) {
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
return false, nil
}
if p.csiDriverLister == nil {
return false, errors.New("CSIDriver lister does not exist")
}
csiDriver, err := p.csiDriverLister.Get(driver)
if err != nil {
if apierrs.IsNotFound(err) {
// Don't skip attach if CSIDriver does not exist
return false, nil
}
return false, err
}
if csiDriver.Spec.AttachRequired != nil && *csiDriver.Spec.AttachRequired == false {
return true, nil
}
return false, nil
}
func (p *csiPlugin) getPublishVolumeInfo(client clientset.Interface, handle, driver, nodeName string) (map[string]string, error) {
skip, err := p.skipAttach(driver)
if err != nil {
return nil, err
}
if skip {
return nil, nil
}
attachID := getAttachmentName(handle, driver, nodeName)
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
attachment, err := client.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
return nil, err // This err already has enough context ("VolumeAttachment xyz not found")
}
if attachment == nil {
err = errors.New("no existing VolumeAttachment found")
return nil, err
}
return attachment.Status.AttachmentMetadata, nil
}
func unregisterDriver(driverName string) {
func() {
csiDrivers.Lock()
defer csiDrivers.Unlock()
delete(csiDrivers.driversMap, driverName)
}()
if err := nim.RemoveNodeInfo(driverName); err != nil {
glog.Errorf("Error unregistering CSI driver: %v", err)
}
}

View File

@@ -27,15 +27,18 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
fakeclient "k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
fakecsi "k8s.io/csi-api/pkg/client/clientset/versioned/fake"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
// create a plugin mgr to load plugins and setup a fake client
func newTestPlugin(t *testing.T) (*csiPlugin, string) {
func newTestPlugin(t *testing.T, client *fakeclient.Clientset, csiClient *fakecsi.Clientset) (*csiPlugin, string) {
err := utilfeature.DefaultFeatureGate.Set("CSIBlockVolume=true")
if err != nil {
t.Fatalf("Failed to enable feature gate for CSIBlockVolume: %v", err)
@@ -46,11 +49,18 @@ func newTestPlugin(t *testing.T) (*csiPlugin, string) {
t.Fatalf("can't create temp dir: %v", err)
}
fakeClient := fakeclient.NewSimpleClientset()
host := volumetest.NewFakeVolumeHost(
if client == nil {
client = fakeclient.NewSimpleClientset()
}
if csiClient == nil {
csiClient = fakecsi.NewSimpleClientset()
}
host := volumetest.NewFakeVolumeHostWithCSINodeName(
tmpDir,
fakeClient,
client,
csiClient,
nil,
"fakeNode",
)
plugMgr := &volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
@@ -65,6 +75,13 @@ func newTestPlugin(t *testing.T) (*csiPlugin, string) {
t.Fatalf("cannot assert plugin to be type csiPlugin")
}
if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
// Wait until the informer in CSI volume plugin has all CSIDrivers.
wait.PollImmediate(testInformerSyncPeriod, testInformerSyncTimeout, func() (bool, error) {
return csiPlug.csiDriverInformer.Informer().HasSynced(), nil
})
}
return csiPlug, tmpDir
}
@@ -92,7 +109,7 @@ func makeTestPV(name string, sizeGig int, driverName, volID string) *api.Persist
}
func TestPluginGetPluginName(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
if plug.GetPluginName() != "kubernetes.io/csi" {
t.Errorf("unexpected plugin name %v", plug.GetPluginName())
@@ -100,7 +117,7 @@ func TestPluginGetPluginName(t *testing.T) {
}
func TestPluginGetVolumeName(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
@@ -129,7 +146,7 @@ func TestPluginGetVolumeName(t *testing.T) {
}
func TestPluginCanSupport(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
@@ -141,7 +158,7 @@ func TestPluginCanSupport(t *testing.T) {
}
func TestPluginConstructVolumeSpec(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
@@ -186,6 +203,14 @@ func TestPluginConstructVolumeSpec(t *testing.T) {
t.Errorf("expected volID %s, got volID %s", tc.data[volDataKey.volHandle], volHandle)
}
if spec.PersistentVolume.Spec.VolumeMode == nil {
t.Fatalf("Volume mode has not been set.")
}
if *spec.PersistentVolume.Spec.VolumeMode != api.PersistentVolumeFilesystem {
t.Errorf("Unexpected volume mode %q", *spec.PersistentVolume.Spec.VolumeMode)
}
if spec.Name() != tc.specVolID {
t.Errorf("Unexpected spec name %s", spec.Name())
}
@@ -193,7 +218,7 @@ func TestPluginConstructVolumeSpec(t *testing.T) {
}
func TestPluginNewMounter(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
@@ -241,7 +266,7 @@ func TestPluginNewMounter(t *testing.T) {
}
func TestPluginNewUnmounter(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
@@ -286,7 +311,7 @@ func TestPluginNewUnmounter(t *testing.T) {
}
func TestPluginNewAttacher(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
@@ -304,7 +329,7 @@ func TestPluginNewAttacher(t *testing.T) {
}
func TestPluginNewDetacher(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
detacher, err := plug.NewDetacher()
@@ -322,7 +347,7 @@ func TestPluginNewDetacher(t *testing.T) {
}
func TestPluginNewBlockMapper(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
pv := makeTestPV("test-block-pv", 10, testDriver, testVol)
@@ -367,7 +392,7 @@ func TestPluginNewBlockMapper(t *testing.T) {
}
func TestPluginNewUnmapper(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
@@ -424,7 +449,7 @@ func TestPluginNewUnmapper(t *testing.T) {
}
func TestPluginConstructBlockVolumeSpec(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
@@ -463,6 +488,14 @@ func TestPluginConstructBlockVolumeSpec(t *testing.T) {
continue
}
if spec.PersistentVolume.Spec.VolumeMode == nil {
t.Fatalf("Volume mode has not been set.")
}
if *spec.PersistentVolume.Spec.VolumeMode != api.PersistentVolumeBlock {
t.Errorf("Unexpected volume mode %q", *spec.PersistentVolume.Spec.VolumeMode)
}
volHandle := spec.PersistentVolume.Spec.CSI.VolumeHandle
if volHandle != tc.data[volDataKey.volHandle] {
t.Errorf("expected volID %s, got volID %s", tc.data[volDataKey.volHandle], volHandle)

View File

@@ -28,6 +28,12 @@ import (
"k8s.io/client-go/kubernetes"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"time"
)
const (
testInformerSyncPeriod = 100 * time.Millisecond
testInformerSyncTimeout = 30 * time.Second
)
func getCredentialsFromSecret(k8s kubernetes.Interface, secretRef *api.SecretReference) (map[string]string, error) {
@@ -121,3 +127,16 @@ func getVolumeDeviceDataDir(specVolID string, host volume.VolumeHost) string {
sanitizedSpecVolID := kstrings.EscapeQualifiedNameForDisk(specVolID)
return path.Join(host.GetVolumeDevicePluginDir(csiPluginName), sanitizedSpecVolID, "data")
}
// hasReadWriteOnce returns true if modes contains v1.ReadWriteOnce
func hasReadWriteOnce(modes []api.PersistentVolumeAccessMode) bool {
if modes == nil {
return false
}
for _, mode := range modes {
if mode == api.ReadWriteOnce {
return true
}
}
return false
}

View File

@@ -56,19 +56,25 @@ func (f *IdentityClient) Probe(ctx context.Context, in *csipb.ProbeRequest, opts
return nil, nil
}
type CSIVolume struct {
Attributes map[string]string
Path string
}
// NodeClient returns CSI node client
type NodeClient struct {
nodePublishedVolumes map[string]string
nodeStagedVolumes map[string]string
nodePublishedVolumes map[string]CSIVolume
nodeStagedVolumes map[string]CSIVolume
stageUnstageSet bool
nodeGetInfoResp *csipb.NodeGetInfoResponse
nextErr error
}
// NewNodeClient returns fake node client
func NewNodeClient(stageUnstageSet bool) *NodeClient {
return &NodeClient{
nodePublishedVolumes: make(map[string]string),
nodeStagedVolumes: make(map[string]string),
nodePublishedVolumes: make(map[string]CSIVolume),
nodeStagedVolumes: make(map[string]CSIVolume),
stageUnstageSet: stageUnstageSet,
}
}
@@ -78,18 +84,25 @@ func (f *NodeClient) SetNextError(err error) {
f.nextErr = err
}
func (f *NodeClient) SetNodeGetInfoResp(resp *csipb.NodeGetInfoResponse) {
f.nodeGetInfoResp = resp
}
// GetNodePublishedVolumes returns node published volumes
func (f *NodeClient) GetNodePublishedVolumes() map[string]string {
func (f *NodeClient) GetNodePublishedVolumes() map[string]CSIVolume {
return f.nodePublishedVolumes
}
// GetNodeStagedVolumes returns node staged volumes
func (f *NodeClient) GetNodeStagedVolumes() map[string]string {
func (f *NodeClient) GetNodeStagedVolumes() map[string]CSIVolume {
return f.nodeStagedVolumes
}
func (f *NodeClient) AddNodeStagedVolume(volID, deviceMountPath string) {
f.nodeStagedVolumes[volID] = deviceMountPath
func (f *NodeClient) AddNodeStagedVolume(volID, deviceMountPath string, attributes map[string]string) {
f.nodeStagedVolumes[volID] = CSIVolume{
Path: deviceMountPath,
Attributes: attributes,
}
}
// NodePublishVolume implements CSI NodePublishVolume
@@ -110,7 +123,10 @@ func (f *NodeClient) NodePublishVolume(ctx context.Context, req *csipb.NodePubli
if !strings.Contains(fsTypes, fsType) {
return nil, errors.New("invalid fstype")
}
f.nodePublishedVolumes[req.GetVolumeId()] = req.GetTargetPath()
f.nodePublishedVolumes[req.GetVolumeId()] = CSIVolume{
Path: req.GetTargetPath(),
Attributes: req.GetVolumeAttributes(),
}
return &csipb.NodePublishVolumeResponse{}, nil
}
@@ -153,7 +169,10 @@ func (f *NodeClient) NodeStageVolume(ctx context.Context, req *csipb.NodeStageVo
return nil, errors.New("invalid fstype")
}
f.nodeStagedVolumes[req.GetVolumeId()] = req.GetStagingTargetPath()
f.nodeStagedVolumes[req.GetVolumeId()] = CSIVolume{
Path: req.GetStagingTargetPath(),
Attributes: req.GetVolumeAttributes(),
}
return &csipb.NodeStageVolumeResponse{}, nil
}
@@ -179,6 +198,14 @@ func (f *NodeClient) NodeGetId(ctx context.Context, in *csipb.NodeGetIdRequest,
return nil, nil
}
// NodeGetId implements csi method
func (f *NodeClient) NodeGetInfo(ctx context.Context, in *csipb.NodeGetInfoRequest, opts ...grpc.CallOption) (*csipb.NodeGetInfoResponse, error) {
if f.nextErr != nil {
return nil, f.nextErr
}
return f.nodeGetInfoResp, nil
}
// NodeGetCapabilities implements csi method
func (f *NodeClient) NodeGetCapabilities(ctx context.Context, in *csipb.NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipb.NodeGetCapabilitiesResponse, error) {
resp := &csipb.NodeGetCapabilitiesResponse{

View File

@@ -1,30 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["labelmanager.go"],
importpath = "k8s.io/kubernetes/pkg/volume/csi/labelmanager",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,251 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package labelmanager includes internal functions used to add/delete labels to
// kubernetes nodes for corresponding CSI drivers
package labelmanager // import "k8s.io/kubernetes/pkg/volume/csi/labelmanager"
import (
"encoding/json"
"fmt"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/util/retry"
)
const (
// Name of node annotation that contains JSON map of driver names to node
// names
annotationKey = "csi.volume.kubernetes.io/nodeid"
csiPluginName = "kubernetes.io/csi"
)
// labelManagementStruct is struct of channels used for communication between the driver registration
// code and the go routine responsible for managing the node's labels
type labelManagerStruct struct {
nodeName types.NodeName
k8s kubernetes.Interface
}
// Interface implements an interface for managing labels of a node
type Interface interface {
AddLabels(driverName string) error
}
// NewLabelManager initializes labelManagerStruct and returns available interfaces
func NewLabelManager(nodeName types.NodeName, kubeClient kubernetes.Interface) Interface {
return labelManagerStruct{
nodeName: nodeName,
k8s: kubeClient,
}
}
// nodeLabelManager waits for labeling requests initiated by the driver's registration
// process.
func (lm labelManagerStruct) AddLabels(driverName string) error {
err := verifyAndAddNodeId(string(lm.nodeName), lm.k8s.CoreV1().Nodes(), driverName, string(lm.nodeName))
if err != nil {
return fmt.Errorf("failed to update node %s's annotation with error: %+v", lm.nodeName, err)
}
return nil
}
// Clones the given map and returns a new map with the given key and value added.
// Returns the given map, if annotationKey is empty.
func cloneAndAddAnnotation(
annotations map[string]string,
annotationKey,
annotationValue string) map[string]string {
if annotationKey == "" {
// Don't need to add an annotation.
return annotations
}
// Clone.
newAnnotations := map[string]string{}
for key, value := range annotations {
newAnnotations[key] = value
}
newAnnotations[annotationKey] = annotationValue
return newAnnotations
}
func verifyAndAddNodeId(
k8sNodeName string,
k8sNodesClient corev1.NodeInterface,
csiDriverName string,
csiDriverNodeId string) error {
// Add or update annotation on Node object
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
// Retrieve the latest version of Node before attempting update, so that
// existing changes are not overwritten. RetryOnConflict uses
// exponential backoff to avoid exhausting the apiserver.
result, getErr := k8sNodesClient.Get(k8sNodeName, metav1.GetOptions{})
if getErr != nil {
glog.Errorf("Failed to get latest version of Node: %v", getErr)
return getErr // do not wrap error
}
var previousAnnotationValue string
if result.ObjectMeta.Annotations != nil {
previousAnnotationValue =
result.ObjectMeta.Annotations[annotationKey]
glog.V(3).Infof(
"previousAnnotationValue=%q", previousAnnotationValue)
}
existingDriverMap := map[string]string{}
if previousAnnotationValue != "" {
// Parse previousAnnotationValue as JSON
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
return fmt.Errorf(
"failed to parse node's %q annotation value (%q) err=%v",
annotationKey,
previousAnnotationValue,
err)
}
}
if val, ok := existingDriverMap[csiDriverName]; ok {
if val == csiDriverNodeId {
// Value already exists in node annotation, nothing more to do
glog.V(1).Infof(
"The key value {%q: %q} alredy eixst in node %q annotation, no need to update: %v",
csiDriverName,
csiDriverNodeId,
annotationKey,
previousAnnotationValue)
return nil
}
}
// Add/update annotation value
existingDriverMap[csiDriverName] = csiDriverNodeId
jsonObj, err := json.Marshal(existingDriverMap)
if err != nil {
return fmt.Errorf(
"failed while trying to add key value {%q: %q} to node %q annotation. Existing value: %v",
csiDriverName,
csiDriverNodeId,
annotationKey,
previousAnnotationValue)
}
result.ObjectMeta.Annotations = cloneAndAddAnnotation(
result.ObjectMeta.Annotations,
annotationKey,
string(jsonObj))
_, updateErr := k8sNodesClient.Update(result)
if updateErr == nil {
fmt.Printf(
"Updated node %q successfully for CSI driver %q and CSI node name %q",
k8sNodeName,
csiDriverName,
csiDriverNodeId)
}
return updateErr // do not wrap error
})
if retryErr != nil {
return fmt.Errorf("node update failed: %v", retryErr)
}
return nil
}
// Fetches Kubernetes node API object corresponding to k8sNodeName.
// If the csiDriverName is present in the node annotation, it is removed.
func verifyAndDeleteNodeId(
k8sNodeName string,
k8sNodesClient corev1.NodeInterface,
csiDriverName string) error {
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
// Retrieve the latest version of Node before attempting update, so that
// existing changes are not overwritten. RetryOnConflict uses
// exponential backoff to avoid exhausting the apiserver.
result, getErr := k8sNodesClient.Get(k8sNodeName, metav1.GetOptions{})
if getErr != nil {
glog.Errorf("failed to get latest version of Node: %v", getErr)
return getErr // do not wrap error
}
var previousAnnotationValue string
if result.ObjectMeta.Annotations != nil {
previousAnnotationValue =
result.ObjectMeta.Annotations[annotationKey]
glog.V(3).Infof(
"previousAnnotationValue=%q", previousAnnotationValue)
}
existingDriverMap := map[string]string{}
if previousAnnotationValue == "" {
// Value already exists in node annotation, nothing more to do
glog.V(1).Infof(
"The key %q does not exist in node %q annotation, no need to cleanup.",
csiDriverName,
annotationKey)
return nil
}
// Parse previousAnnotationValue as JSON
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
return fmt.Errorf(
"failed to parse node's %q annotation value (%q) err=%v",
annotationKey,
previousAnnotationValue,
err)
}
if _, ok := existingDriverMap[csiDriverName]; !ok {
// Value already exists in node annotation, nothing more to do
glog.V(1).Infof(
"The key %q does not eixst in node %q annotation, no need to cleanup: %v",
csiDriverName,
annotationKey,
previousAnnotationValue)
return nil
}
// Add/update annotation value
delete(existingDriverMap, csiDriverName)
jsonObj, err := json.Marshal(existingDriverMap)
if err != nil {
return fmt.Errorf(
"failed while trying to remove key %q from node %q annotation. Existing data: %v",
csiDriverName,
annotationKey,
previousAnnotationValue)
}
result.ObjectMeta.Annotations = cloneAndAddAnnotation(
result.ObjectMeta.Annotations,
annotationKey,
string(jsonObj))
_, updateErr := k8sNodesClient.Update(result)
if updateErr == nil {
fmt.Printf(
"Updated node %q annotation to remove CSI driver %q.",
k8sNodeName,
csiDriverName)
}
return updateErr // do not wrap error
})
if retryErr != nil {
return fmt.Errorf("node update failed: %v", retryErr)
}
return nil
}

View File

@@ -0,0 +1,64 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["nodeinfomanager.go"],
importpath = "k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager",
visibility = ["//visibility:public"],
deps = [
"//pkg/features:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library",
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["nodeinfomanager_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core/helper:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/features:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned/fake:go_default_library",
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
],
)

View File

@@ -0,0 +1,543 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package nodeinfomanager includes internal functions used to add/delete labels to
// kubernetes nodes for corresponding CSI drivers
package nodeinfomanager // import "k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager"
import (
"encoding/json"
"fmt"
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/util/retry"
csiv1alpha1 "k8s.io/csi-api/pkg/apis/csi/v1alpha1"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
const (
// Name of node annotation that contains JSON map of driver names to node
annotationKeyNodeID = "csi.volume.kubernetes.io/nodeid"
)
var nodeKind = v1.SchemeGroupVersion.WithKind("Node")
// nodeInfoManager contains necessary common dependencies to update node info on both
// the Node and CSINodeInfo objects.
type nodeInfoManager struct {
nodeName types.NodeName
volumeHost volume.VolumeHost
}
// If no updates is needed, the function must return the same Node object as the input.
type nodeUpdateFunc func(*v1.Node) (newNode *v1.Node, updated bool, err error)
// Interface implements an interface for managing labels of a node
type Interface interface {
// Record in the cluster the given node information from the CSI driver with the given name.
// Concurrent calls to AddNodeInfo() is allowed, but they should not be intertwined with calls
// to other methods in this interface.
AddNodeInfo(driverName string, driverNodeID string, maxVolumeLimit int64, topology *csipb.Topology) error
// Remove in the cluster node information from the CSI driver with the given name.
// Concurrent calls to RemoveNodeInfo() is allowed, but they should not be intertwined with calls
// to other methods in this interface.
RemoveNodeInfo(driverName string) error
}
// NewNodeInfoManager initializes nodeInfoManager
func NewNodeInfoManager(
nodeName types.NodeName,
volumeHost volume.VolumeHost) Interface {
return &nodeInfoManager{
nodeName: nodeName,
volumeHost: volumeHost,
}
}
// AddNodeInfo updates the node ID annotation in the Node object and CSIDrivers field in the
// CSINodeInfo object. If the CSINodeInfo object doesn't yet exist, it will be created.
// If multiple calls to AddNodeInfo() are made in parallel, some calls might receive Node or
// CSINodeInfo update conflicts, which causes the function to retry the corresponding update.
func (nim *nodeInfoManager) AddNodeInfo(driverName string, driverNodeID string, maxAttachLimit int64, topology *csipb.Topology) error {
if driverNodeID == "" {
return fmt.Errorf("error adding CSI driver node info: driverNodeID must not be empty")
}
nodeUpdateFuncs := []nodeUpdateFunc{
updateNodeIDInNode(driverName, driverNodeID),
}
if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) {
nodeUpdateFuncs = append(nodeUpdateFuncs, updateTopologyLabels(topology))
}
if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
nodeUpdateFuncs = append(nodeUpdateFuncs, updateMaxAttachLimit(driverName, maxAttachLimit))
}
err := nim.updateNode(nodeUpdateFuncs...)
if err != nil {
return fmt.Errorf("error updating Node object with CSI driver node info: %v", err)
}
if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) {
err = nim.updateCSINodeInfo(driverName, driverNodeID, topology)
if err != nil {
return fmt.Errorf("error updating CSINodeInfo object with CSI driver node info: %v", err)
}
}
return nil
}
// RemoveNodeInfo removes the node ID annotation from the Node object and CSIDrivers field from the
// CSINodeInfo object. If the CSINOdeInfo object contains no CSIDrivers, it will be deleted.
// If multiple calls to RemoveNodeInfo() are made in parallel, some calls might receive Node or
// CSINodeInfo update conflicts, which causes the function to retry the corresponding update.
func (nim *nodeInfoManager) RemoveNodeInfo(driverName string) error {
err := nim.removeCSINodeInfo(driverName)
if err != nil {
return fmt.Errorf("error removing CSI driver node info from CSINodeInfo object %v", err)
}
err = nim.updateNode(
removeMaxAttachLimit(driverName),
removeNodeIDFromNode(driverName),
)
if err != nil {
return fmt.Errorf("error removing CSI driver node info from Node object %v", err)
}
return nil
}
// updateNode repeatedly attempts to update the corresponding node object
// which is modified by applying the given update functions sequentially.
// Because updateFuncs are applied sequentially, later updateFuncs should take into account
// the effects of previous updateFuncs to avoid potential conflicts. For example, if multiple
// functions update the same field, updates in the last function are persisted.
func (nim *nodeInfoManager) updateNode(updateFuncs ...nodeUpdateFunc) error {
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
// Retrieve the latest version of Node before attempting update, so that
// existing changes are not overwritten. RetryOnConflict uses
// exponential backoff to avoid exhausting the apiserver.
kubeClient := nim.volumeHost.GetKubeClient()
if kubeClient == nil {
return fmt.Errorf("error getting kube client")
}
nodeClient := kubeClient.CoreV1().Nodes()
node, err := nodeClient.Get(string(nim.nodeName), metav1.GetOptions{})
if err != nil {
return err // do not wrap error
}
needUpdate := false
for _, update := range updateFuncs {
newNode, updated, err := update(node)
if err != nil {
return err
}
node = newNode
needUpdate = needUpdate || updated
}
if needUpdate {
_, updateErr := nodeClient.Update(node)
return updateErr // do not wrap error
}
return nil
})
if retryErr != nil {
return fmt.Errorf("node update failed: %v", retryErr)
}
return nil
}
// Guarantees the map is non-nil if no error is returned.
func buildNodeIDMapFromAnnotation(node *v1.Node) (map[string]string, error) {
var previousAnnotationValue string
if node.ObjectMeta.Annotations != nil {
previousAnnotationValue =
node.ObjectMeta.Annotations[annotationKeyNodeID]
}
var existingDriverMap map[string]string
if previousAnnotationValue != "" {
// Parse previousAnnotationValue as JSON
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
return nil, fmt.Errorf(
"failed to parse node's %q annotation value (%q) err=%v",
annotationKeyNodeID,
previousAnnotationValue,
err)
}
}
if existingDriverMap == nil {
return make(map[string]string), nil
}
return existingDriverMap, nil
}
// updateNodeIDInNode returns a function that updates a Node object with the given
// Node ID information.
func updateNodeIDInNode(
csiDriverName string,
csiDriverNodeID string) nodeUpdateFunc {
return func(node *v1.Node) (*v1.Node, bool, error) {
existingDriverMap, err := buildNodeIDMapFromAnnotation(node)
if err != nil {
return nil, false, err
}
if val, ok := existingDriverMap[csiDriverName]; ok {
if val == csiDriverNodeID {
// Value already exists in node annotation, nothing more to do
return node, false, nil
}
}
// Add/update annotation value
existingDriverMap[csiDriverName] = csiDriverNodeID
jsonObj, err := json.Marshal(existingDriverMap)
if err != nil {
return nil, false, fmt.Errorf(
"error while marshalling node ID map updated with driverName=%q, nodeID=%q: %v",
csiDriverName,
csiDriverNodeID,
err)
}
if node.ObjectMeta.Annotations == nil {
node.ObjectMeta.Annotations = make(map[string]string)
}
node.ObjectMeta.Annotations[annotationKeyNodeID] = string(jsonObj)
return node, true, nil
}
}
// removeNodeIDFromNode returns a function that removes node ID information matching the given
// driver name from a Node object.
func removeNodeIDFromNode(csiDriverName string) nodeUpdateFunc {
return func(node *v1.Node) (*v1.Node, bool, error) {
var previousAnnotationValue string
if node.ObjectMeta.Annotations != nil {
previousAnnotationValue =
node.ObjectMeta.Annotations[annotationKeyNodeID]
}
if previousAnnotationValue == "" {
return node, false, nil
}
// Parse previousAnnotationValue as JSON
existingDriverMap := map[string]string{}
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
return nil, false, fmt.Errorf(
"failed to parse node's %q annotation value (%q) err=%v",
annotationKeyNodeID,
previousAnnotationValue,
err)
}
if _, ok := existingDriverMap[csiDriverName]; !ok {
// Value is already missing in node annotation, nothing more to do
return node, false, nil
}
// Delete annotation value
delete(existingDriverMap, csiDriverName)
if len(existingDriverMap) == 0 {
delete(node.ObjectMeta.Annotations, annotationKeyNodeID)
} else {
jsonObj, err := json.Marshal(existingDriverMap)
if err != nil {
return nil, false, fmt.Errorf(
"failed while trying to remove key %q from node %q annotation. Existing data: %v",
csiDriverName,
annotationKeyNodeID,
previousAnnotationValue)
}
node.ObjectMeta.Annotations[annotationKeyNodeID] = string(jsonObj)
}
return node, true, nil
}
}
// updateTopologyLabels returns a function that updates labels of a Node object with the given
// topology information.
func updateTopologyLabels(topology *csipb.Topology) nodeUpdateFunc {
return func(node *v1.Node) (*v1.Node, bool, error) {
if topology == nil || len(topology.Segments) == 0 {
return node, false, nil
}
for k, v := range topology.Segments {
if curVal, exists := node.Labels[k]; exists && curVal != v {
return nil, false, fmt.Errorf("detected topology value collision: driver reported %q:%q but existing label is %q:%q", k, v, k, curVal)
}
}
if node.Labels == nil {
node.Labels = make(map[string]string)
}
for k, v := range topology.Segments {
node.Labels[k] = v
}
return node, true, nil
}
}
func (nim *nodeInfoManager) updateCSINodeInfo(
driverName string,
driverNodeID string,
topology *csipb.Topology) error {
csiKubeClient := nim.volumeHost.GetCSIClient()
if csiKubeClient == nil {
return fmt.Errorf("error getting CSI client")
}
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
nodeInfo, err := csiKubeClient.CsiV1alpha1().CSINodeInfos().Get(string(nim.nodeName), metav1.GetOptions{})
if nodeInfo == nil || errors.IsNotFound(err) {
return nim.createNodeInfoObject(driverName, driverNodeID, topology)
}
if err != nil {
return err // do not wrap error
}
return nim.updateNodeInfoObject(nodeInfo, driverName, driverNodeID, topology)
})
if retryErr != nil {
return fmt.Errorf("CSINodeInfo update failed: %v", retryErr)
}
return nil
}
func (nim *nodeInfoManager) createNodeInfoObject(
driverName string,
driverNodeID string,
topology *csipb.Topology) error {
kubeClient := nim.volumeHost.GetKubeClient()
if kubeClient == nil {
return fmt.Errorf("error getting kube client")
}
csiKubeClient := nim.volumeHost.GetCSIClient()
if csiKubeClient == nil {
return fmt.Errorf("error getting CSI client")
}
topologyKeys := []string{} // must be an empty slice instead of nil to satisfy CRD OpenAPI Schema validation
if topology != nil {
for k := range topology.Segments {
topologyKeys = append(topologyKeys, k)
}
}
node, err := kubeClient.CoreV1().Nodes().Get(string(nim.nodeName), metav1.GetOptions{})
if err != nil {
return err // do not wrap error
}
nodeInfo := &csiv1alpha1.CSINodeInfo{
ObjectMeta: metav1.ObjectMeta{
Name: string(nim.nodeName),
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: nodeKind.Version,
Kind: nodeKind.Kind,
Name: node.Name,
UID: node.UID,
},
},
},
CSIDrivers: []csiv1alpha1.CSIDriverInfo{
{
Driver: driverName,
NodeID: driverNodeID,
TopologyKeys: topologyKeys,
},
},
}
_, err = csiKubeClient.CsiV1alpha1().CSINodeInfos().Create(nodeInfo)
return err // do not wrap error
}
func (nim *nodeInfoManager) updateNodeInfoObject(
nodeInfo *csiv1alpha1.CSINodeInfo,
driverName string,
driverNodeID string,
topology *csipb.Topology) error {
csiKubeClient := nim.volumeHost.GetCSIClient()
if csiKubeClient == nil {
return fmt.Errorf("error getting CSI client")
}
topologyKeys := make(sets.String)
if topology != nil {
for k := range topology.Segments {
topologyKeys.Insert(k)
}
}
// Clone driver list, omitting the driver that matches the given driverName,
// unless the driver is identical to information provided, in which case no update is necessary.
var newDriverInfos []csiv1alpha1.CSIDriverInfo
for _, driverInfo := range nodeInfo.CSIDrivers {
if driverInfo.Driver == driverName {
prevTopologyKeys := sets.NewString(driverInfo.TopologyKeys...)
if driverInfo.NodeID == driverNodeID && prevTopologyKeys.Equal(topologyKeys) {
// No update needed
return nil
}
} else {
// Omit driverInfo matching given driverName
newDriverInfos = append(newDriverInfos, driverInfo)
}
}
// Append new driver
driverInfo := csiv1alpha1.CSIDriverInfo{
Driver: driverName,
NodeID: driverNodeID,
TopologyKeys: topologyKeys.List(),
}
newDriverInfos = append(newDriverInfos, driverInfo)
nodeInfo.CSIDrivers = newDriverInfos
_, err := csiKubeClient.CsiV1alpha1().CSINodeInfos().Update(nodeInfo)
return err // do not wrap error
}
func (nim *nodeInfoManager) removeCSINodeInfo(csiDriverName string) error {
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
csiKubeClient := nim.volumeHost.GetCSIClient()
if csiKubeClient == nil {
return fmt.Errorf("error getting CSI client")
}
nodeInfoClient := csiKubeClient.CsiV1alpha1().CSINodeInfos()
nodeInfo, err := nodeInfoClient.Get(string(nim.nodeName), metav1.GetOptions{})
if nodeInfo == nil || errors.IsNotFound(err) {
// do nothing
return nil
}
if err != nil {
return err // do not wrap error
}
// Remove matching driver from driver list
var newDriverInfos []csiv1alpha1.CSIDriverInfo
for _, driverInfo := range nodeInfo.CSIDrivers {
if driverInfo.Driver != csiDriverName {
newDriverInfos = append(newDriverInfos, driverInfo)
}
}
if len(newDriverInfos) == len(nodeInfo.CSIDrivers) {
// No changes, don't update
return nil
}
if len(newDriverInfos) == 0 {
// No drivers left, delete CSINodeInfo object
return nodeInfoClient.Delete(string(nim.nodeName), &metav1.DeleteOptions{})
}
// TODO (verult) make sure CSINodeInfo has validation logic to prevent duplicate driver names
_, updateErr := nodeInfoClient.Update(nodeInfo)
return updateErr // do not wrap error
})
if retryErr != nil {
return fmt.Errorf("CSINodeInfo update failed: %v", retryErr)
}
return nil
}
func updateMaxAttachLimit(driverName string, maxLimit int64) nodeUpdateFunc {
return func(node *v1.Node) (*v1.Node, bool, error) {
if maxLimit <= 0 {
glog.V(4).Infof("skipping adding attach limit for %s", driverName)
return node, false, nil
}
if node.Status.Capacity == nil {
node.Status.Capacity = v1.ResourceList{}
}
if node.Status.Allocatable == nil {
node.Status.Allocatable = v1.ResourceList{}
}
limitKeyName := util.GetCSIAttachLimitKey(driverName)
node.Status.Capacity[v1.ResourceName(limitKeyName)] = *resource.NewQuantity(maxLimit, resource.DecimalSI)
node.Status.Allocatable[v1.ResourceName(limitKeyName)] = *resource.NewQuantity(maxLimit, resource.DecimalSI)
return node, true, nil
}
}
func removeMaxAttachLimit(driverName string) nodeUpdateFunc {
return func(node *v1.Node) (*v1.Node, bool, error) {
limitKey := v1.ResourceName(util.GetCSIAttachLimitKey(driverName))
capacityExists := false
if node.Status.Capacity != nil {
_, capacityExists = node.Status.Capacity[limitKey]
}
allocatableExists := false
if node.Status.Allocatable != nil {
_, allocatableExists = node.Status.Allocatable[limitKey]
}
if !capacityExists && !allocatableExists {
return node, false, nil
}
delete(node.Status.Capacity, limitKey)
if len(node.Status.Capacity) == 0 {
node.Status.Capacity = nil
}
delete(node.Status.Allocatable, limitKey)
if len(node.Status.Allocatable) == 0 {
node.Status.Allocatable = nil
}
return node, true, nil
}
}

View File

@@ -0,0 +1,809 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodeinfomanager
import (
"encoding/json"
"testing"
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
csiv1alpha1 "k8s.io/csi-api/pkg/apis/csi/v1alpha1"
csifake "k8s.io/csi-api/pkg/client/clientset/versioned/fake"
"k8s.io/kubernetes/pkg/apis/core/helper"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util"
)
type testcase struct {
name string
driverName string
existingNode *v1.Node
existingNodeInfo *csiv1alpha1.CSINodeInfo
inputNodeID string
inputTopology *csi.Topology
inputVolumeLimit int64
expectedNodeIDMap map[string]string
expectedTopologyMap map[string]sets.String
expectedLabels map[string]string
expectNoNodeInfo bool
expectedVolumeLimit int64
expectFail bool
}
type nodeIDMap map[string]string
type topologyKeyMap map[string][]string
type labelMap map[string]string
// TestAddNodeInfo tests AddNodeInfo with various existing Node and/or CSINodeInfo objects.
// The node IDs in all test cases below are the same between the Node annotation and CSINodeInfo.
func TestAddNodeInfo(t *testing.T) {
testcases := []testcase{
{
name: "empty node",
driverName: "com.example.csi/driver1",
existingNode: generateNode(nil /* nodeIDs */, nil /* labels */, nil /*capacity*/),
inputNodeID: "com.example.csi/csi-node1",
inputTopology: &csi.Topology{
Segments: map[string]string{
"com.example.csi/zone": "zoneA",
},
},
expectedNodeIDMap: map[string]string{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
expectedTopologyMap: map[string]sets.String{
"com.example.csi/driver1": sets.NewString("com.example.csi/zone"),
},
expectedLabels: map[string]string{"com.example.csi/zone": "zoneA"},
},
{
name: "pre-existing node info from the same driver",
driverName: "com.example.csi/driver1",
existingNode: generateNode(
nodeIDMap{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
labelMap{
"com.example.csi/zone": "zoneA",
},
nil /*capacity*/),
existingNodeInfo: generateNodeInfo(
nodeIDMap{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
topologyKeyMap{
"com.example.csi/driver1": {"com.example.csi/zone"},
},
),
inputNodeID: "com.example.csi/csi-node1",
inputTopology: &csi.Topology{
Segments: map[string]string{
"com.example.csi/zone": "zoneA",
},
},
expectedNodeIDMap: map[string]string{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
expectedTopologyMap: map[string]sets.String{
"com.example.csi/driver1": sets.NewString("com.example.csi/zone"),
},
expectedLabels: map[string]string{
"com.example.csi/zone": "zoneA",
},
},
{
name: "pre-existing node info from the same driver, but without topology info",
driverName: "com.example.csi/driver1",
existingNode: generateNode(
nodeIDMap{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
nil /* labels */, nil /*capacity*/),
existingNodeInfo: generateNodeInfo(
nodeIDMap{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
nil, /* topologyKeys */
),
inputNodeID: "com.example.csi/csi-node1",
inputTopology: &csi.Topology{
Segments: map[string]string{
"com.example.csi/zone": "zoneA",
},
},
expectedNodeIDMap: map[string]string{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
expectedTopologyMap: map[string]sets.String{
"com.example.csi/driver1": sets.NewString("com.example.csi/zone"),
},
expectedLabels: map[string]string{
"com.example.csi/zone": "zoneA",
},
},
{
name: "pre-existing node info from different driver",
driverName: "com.example.csi/driver1",
existingNode: generateNode(
nodeIDMap{
"net.example.storage/other-driver": "net.example.storage/test-node",
},
labelMap{
"net.example.storage/rack": "rack1",
}, nil /*capacity*/),
existingNodeInfo: generateNodeInfo(
nodeIDMap{
"net.example.storage/other-driver": "net.example.storage/test-node",
},
topologyKeyMap{
"net.example.storage/other-driver": {"net.example.storage/rack"},
},
),
inputNodeID: "com.example.csi/csi-node1",
inputTopology: &csi.Topology{
Segments: map[string]string{
"com.example.csi/zone": "zoneA",
},
},
expectedNodeIDMap: map[string]string{
"com.example.csi/driver1": "com.example.csi/csi-node1",
"net.example.storage/other-driver": "net.example.storage/test-node",
},
expectedTopologyMap: map[string]sets.String{
"com.example.csi/driver1": sets.NewString("com.example.csi/zone"),
"net.example.storage/other-driver": sets.NewString("net.example.storage/rack"),
},
expectedLabels: map[string]string{
"com.example.csi/zone": "zoneA",
"net.example.storage/rack": "rack1",
},
},
{
name: "pre-existing node info from the same driver, but different node ID and topology values; labels should conflict",
driverName: "com.example.csi/driver1",
existingNode: generateNode(
nodeIDMap{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
labelMap{
"com.example.csi/zone": "zoneA",
}, nil /*capacity*/),
existingNodeInfo: generateNodeInfo(
nodeIDMap{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
topologyKeyMap{
"com.example.csi/driver1": {"com.example.csi/zone"},
},
),
inputNodeID: "com.example.csi/csi-node1",
inputTopology: &csi.Topology{
Segments: map[string]string{
"com.example.csi/zone": "other-zone",
},
},
expectFail: true,
},
{
name: "pre-existing node info from the same driver, but different node ID and topology keys; new labels should be added",
driverName: "com.example.csi/driver1",
existingNode: generateNode(
nodeIDMap{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
labelMap{
"com.example.csi/zone": "zoneA",
}, nil /*capacity*/),
existingNodeInfo: generateNodeInfo(
nodeIDMap{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
topologyKeyMap{
"com.example.csi/driver1": {"com.example.csi/zone"},
},
),
inputNodeID: "com.example.csi/other-node",
inputTopology: &csi.Topology{
Segments: map[string]string{
"com.example.csi/rack": "rack1",
},
},
expectedNodeIDMap: map[string]string{
"com.example.csi/driver1": "com.example.csi/other-node",
},
expectedTopologyMap: map[string]sets.String{
"com.example.csi/driver1": sets.NewString("com.example.csi/rack"),
},
expectedLabels: map[string]string{
"com.example.csi/zone": "zoneA",
"com.example.csi/rack": "rack1",
},
},
{
name: "nil topology, empty node",
driverName: "com.example.csi/driver1",
existingNode: generateNode(nil /* nodeIDs */, nil /* labels */, nil /*capacity*/),
inputNodeID: "com.example.csi/csi-node1",
inputTopology: nil,
expectedNodeIDMap: map[string]string{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
expectedTopologyMap: map[string]sets.String{
"com.example.csi/driver1": nil,
},
expectedLabels: nil,
},
{
name: "nil topology, pre-existing node info from the same driver",
driverName: "com.example.csi/driver1",
existingNode: generateNode(
nodeIDMap{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
labelMap{
"com.example.csi/zone": "zoneA",
}, nil /*capacity*/),
existingNodeInfo: generateNodeInfo(
nodeIDMap{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
topologyKeyMap{
"com.example.csi/driver1": {"com.example.csi/zone"},
},
),
inputNodeID: "com.example.csi/csi-node1",
inputTopology: nil,
expectedNodeIDMap: map[string]string{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
expectedTopologyMap: map[string]sets.String{
"com.example.csi/driver1": nil,
},
expectedLabels: map[string]string{
"com.example.csi/zone": "zoneA", // old labels are not removed
},
},
{
name: "nil topology, pre-existing node info from different driver",
driverName: "com.example.csi/driver1",
existingNode: generateNode(
nodeIDMap{
"net.example.storage/other-driver": "net.example.storage/test-node",
},
labelMap{
"net.example.storage/rack": "rack1",
}, nil /*capacity*/),
existingNodeInfo: generateNodeInfo(
nodeIDMap{
"net.example.storage/other-driver": "net.example.storage/test-node",
},
topologyKeyMap{
"net.example.storage/other-driver": {"net.example.storage/rack"},
},
),
inputNodeID: "com.example.csi/csi-node1",
inputTopology: nil,
expectedNodeIDMap: map[string]string{
"com.example.csi/driver1": "com.example.csi/csi-node1",
"net.example.storage/other-driver": "net.example.storage/test-node",
},
expectedTopologyMap: map[string]sets.String{
"net.example.storage/other-driver": sets.NewString("net.example.storage/rack"),
"com.example.csi/driver1": nil,
},
expectedLabels: map[string]string{
"net.example.storage/rack": "rack1",
},
},
{
name: "empty node ID",
driverName: "com.example.csi/driver1",
existingNode: generateNode(nil /* nodeIDs */, nil /* labels */, nil /*capacity*/),
inputNodeID: "",
expectFail: true,
},
{
name: "new node with valid max limit",
driverName: "com.example.csi/driver1",
existingNode: generateNode(nil /*nodeIDs*/, nil /*labels*/, nil /*capacity*/),
inputVolumeLimit: 10,
inputTopology: nil,
inputNodeID: "com.example.csi/csi-node1",
expectedVolumeLimit: 10,
expectedNodeIDMap: map[string]string{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
expectedTopologyMap: map[string]sets.String{
"com.example.csi/driver1": nil,
},
expectedLabels: nil,
},
{
name: "node with existing valid max limit",
driverName: "com.example.csi/driver1",
existingNode: generateNode(
nil, /*nodeIDs*/
nil, /*labels*/
map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: *resource.NewScaledQuantity(4, -3),
v1.ResourceName(util.GetCSIAttachLimitKey("com.example.csi/driver1")): *resource.NewQuantity(10, resource.DecimalSI),
}),
inputVolumeLimit: 20,
inputTopology: nil,
inputNodeID: "com.example.csi/csi-node1",
expectedVolumeLimit: 20,
expectedNodeIDMap: map[string]string{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
expectedTopologyMap: map[string]sets.String{
"com.example.csi/driver1": nil,
},
expectedLabels: nil,
},
}
test(t, true /* addNodeInfo */, true /* csiNodeInfoEnabled */, testcases)
}
// TestAddNodeInfo_CSINodeInfoDisabled tests AddNodeInfo with various existing Node annotations
// and CSINodeInfo feature gate disabled.
func TestAddNodeInfo_CSINodeInfoDisabled(t *testing.T) {
testcases := []testcase{
{
name: "empty node",
driverName: "com.example.csi/driver1",
existingNode: generateNode(nil /* nodeIDs */, nil /* labels */, nil /*capacity*/),
inputNodeID: "com.example.csi/csi-node1",
expectedNodeIDMap: map[string]string{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
},
{
name: "pre-existing node info from the same driver",
driverName: "com.example.csi/driver1",
existingNode: generateNode(
nodeIDMap{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
nil /* labels */, nil /*capacity*/),
inputNodeID: "com.example.csi/csi-node1",
expectedNodeIDMap: map[string]string{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
},
{
name: "pre-existing node info from different driver",
driverName: "com.example.csi/driver1",
existingNode: generateNode(
nodeIDMap{
"net.example.storage/other-driver": "net.example.storage/test-node",
},
nil /* labels */, nil /*capacity*/),
inputNodeID: "com.example.csi/csi-node1",
expectedNodeIDMap: map[string]string{
"com.example.csi/driver1": "com.example.csi/csi-node1",
"net.example.storage/other-driver": "net.example.storage/test-node",
},
},
}
test(t, true /* addNodeInfo */, false /* csiNodeInfoEnabled */, testcases)
}
// TestRemoveNodeInfo tests RemoveNodeInfo with various existing Node and/or CSINodeInfo objects.
func TestRemoveNodeInfo(t *testing.T) {
testcases := []testcase{
{
name: "empty node and no CSINodeInfo",
driverName: "com.example.csi/driver1",
existingNode: generateNode(nil /* nodeIDs */, nil /* labels */, nil /*capacity*/),
expectedNodeIDMap: nil,
expectedLabels: nil,
expectNoNodeInfo: true,
},
{
name: "pre-existing node info from the same driver",
driverName: "com.example.csi/driver1",
existingNode: generateNode(
nodeIDMap{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
labelMap{
"com.example.csi/zone": "zoneA",
}, nil /*capacity*/),
existingNodeInfo: generateNodeInfo(
nodeIDMap{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
topologyKeyMap{
"com.example.csi/driver1": {"com.example.csi/zone"},
},
),
expectedNodeIDMap: nil,
expectedLabels: map[string]string{"com.example.csi/zone": "zoneA"},
expectNoNodeInfo: true,
},
{
name: "pre-existing node info from different driver",
driverName: "com.example.csi/driver1",
existingNode: generateNode(
nodeIDMap{
"net.example.storage/other-driver": "net.example.storage/csi-node1",
},
labelMap{
"net.example.storage/zone": "zoneA",
}, nil /*capacity*/),
existingNodeInfo: generateNodeInfo(
nodeIDMap{
"net.example.storage/other-driver": "net.example.storage/csi-node1",
},
topologyKeyMap{
"net.example.storage/other-driver": {"net.example.storage/zone"},
},
),
expectedNodeIDMap: map[string]string{
"net.example.storage/other-driver": "net.example.storage/csi-node1",
},
expectedTopologyMap: map[string]sets.String{
"net.example.storage/other-driver": sets.NewString("net.example.storage/zone"),
},
expectedLabels: map[string]string{"net.example.storage/zone": "zoneA"},
},
{
name: "pre-existing info about the same driver in node, but no CSINodeInfo",
driverName: "com.example.csi/driver1",
existingNode: generateNode(
nodeIDMap{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
nil /* labels */, nil /*capacity*/),
expectedNodeIDMap: nil,
expectedLabels: nil,
expectNoNodeInfo: true,
},
{
name: "pre-existing info about a different driver in node, but no CSINodeInfo",
existingNode: generateNode(
nodeIDMap{
"net.example.storage/other-driver": "net.example.storage/csi-node1",
},
nil /* labels */, nil /*capacity*/),
expectedNodeIDMap: map[string]string{
"net.example.storage/other-driver": "net.example.storage/csi-node1",
},
expectedLabels: nil,
expectNoNodeInfo: true,
},
{
name: "new node with valid max limit",
driverName: "com.example.csi/driver1",
existingNode: generateNode(
nil, /*nodeIDs*/
nil, /*labels*/
map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: *resource.NewScaledQuantity(4, -3),
v1.ResourceName(util.GetCSIAttachLimitKey("com.example.csi/driver1")): *resource.NewQuantity(10, resource.DecimalSI),
},
),
inputTopology: nil,
inputNodeID: "com.example.csi/csi-node1",
expectNoNodeInfo: true,
expectedVolumeLimit: 0,
},
}
test(t, false /* addNodeInfo */, true /* csiNodeInfoEnabled */, testcases)
}
// TestRemoveNodeInfo tests RemoveNodeInfo with various existing Node objects and CSINodeInfo
// feature disabled.
func TestRemoveNodeInfo_CSINodeInfoDisabled(t *testing.T) {
testcases := []testcase{
{
name: "empty node",
driverName: "com.example.csi/driver1",
existingNode: generateNode(nil /* nodeIDs */, nil /* labels */, nil /*capacity*/),
expectedNodeIDMap: nil,
},
{
name: "pre-existing node info from the same driver",
driverName: "com.example.csi/driver1",
existingNode: generateNode(
nodeIDMap{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
nil /* labels */, nil /*capacity*/),
expectedNodeIDMap: nil,
},
{
name: "pre-existing node info from different driver",
driverName: "com.example.csi/driver1",
existingNode: generateNode(
nodeIDMap{
"net.example.storage/other-driver": "net.example.storage/csi-node1",
},
nil /* labels */, nil /*capacity*/),
expectedNodeIDMap: map[string]string{
"net.example.storage/other-driver": "net.example.storage/csi-node1",
},
},
}
test(t, false /* addNodeInfo */, false /* csiNodeInfoEnabled */, testcases)
}
func TestAddNodeInfoExistingAnnotation(t *testing.T) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSINodeInfo, true)()
driverName := "com.example.csi/driver1"
nodeID := "com.example.csi/some-node"
testcases := []struct {
name string
existingNode *v1.Node
}{
{
name: "pre-existing info about the same driver in node, but no CSINodeInfo",
existingNode: generateNode(
nodeIDMap{
"com.example.csi/driver1": "com.example.csi/csi-node1",
},
nil /* labels */, nil /*capacity*/),
},
{
name: "pre-existing info about a different driver in node, but no CSINodeInfo",
existingNode: generateNode(
nodeIDMap{
"net.example.storage/other-driver": "net.example.storage/test-node",
},
nil /* labels */, nil /*capacity*/),
},
}
for _, tc := range testcases {
t.Logf("test case: %q", tc.name)
// Arrange
nodeName := tc.existingNode.Name
client := fake.NewSimpleClientset(tc.existingNode)
csiClient := csifake.NewSimpleClientset()
tmpDir, err := utiltesting.MkTmpdir("nodeinfomanager-test")
if err != nil {
t.Fatalf("can't create temp dir: %v", err)
}
host := volumetest.NewFakeVolumeHostWithCSINodeName(
tmpDir,
client,
csiClient,
nil,
nodeName,
)
nim := NewNodeInfoManager(types.NodeName(nodeName), host)
// Act
err = nim.AddNodeInfo(driverName, nodeID, 0 /* maxVolumeLimit */, nil) // TODO test maxVolumeLimit
if err != nil {
t.Errorf("expected no error from AddNodeInfo call but got: %v", err)
continue
}
// Assert
nodeInfo, err := csiClient.Csi().CSINodeInfos().Get(nodeName, metav1.GetOptions{})
if err != nil {
t.Errorf("error getting CSINodeInfo: %v", err)
continue
}
if len(nodeInfo.CSIDrivers) != 1 {
t.Errorf("expected 1 CSIDriverInfo entry but got: %d", len(nodeInfo.CSIDrivers))
continue
}
driver := nodeInfo.CSIDrivers[0]
if driver.Driver != driverName || driver.NodeID != nodeID {
t.Errorf("expected Driver to be %q and NodeID to be %q, but got: %q:%q", driverName, nodeID, driver.Driver, driver.NodeID)
}
}
}
func test(t *testing.T, addNodeInfo bool, csiNodeInfoEnabled bool, testcases []testcase) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSINodeInfo, csiNodeInfoEnabled)()
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
for _, tc := range testcases {
t.Logf("test case: %q", tc.name)
//// Arrange
nodeName := tc.existingNode.Name
client := fake.NewSimpleClientset(tc.existingNode)
var csiClient *csifake.Clientset
if tc.existingNodeInfo == nil {
csiClient = csifake.NewSimpleClientset()
} else {
csiClient = csifake.NewSimpleClientset(tc.existingNodeInfo)
}
tmpDir, err := utiltesting.MkTmpdir("nodeinfomanager-test")
if err != nil {
t.Fatalf("can't create temp dir: %v", err)
}
host := volumetest.NewFakeVolumeHostWithCSINodeName(
tmpDir,
client,
csiClient,
nil,
nodeName,
)
nim := NewNodeInfoManager(types.NodeName(nodeName), host)
//// Act
if addNodeInfo {
err = nim.AddNodeInfo(tc.driverName, tc.inputNodeID, tc.inputVolumeLimit, tc.inputTopology)
} else {
err = nim.RemoveNodeInfo(tc.driverName)
}
//// Assert
if tc.expectFail {
if err == nil {
t.Errorf("expected an error from AddNodeInfo call but got none")
}
continue
} else if err != nil {
t.Errorf("expected no error from AddNodeInfo call but got: %v", err)
continue
}
/* Node Validation */
node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
t.Errorf("error getting node: %v", err)
continue
}
// We are testing max volume limits
attachLimit := getVolumeLimit(node, tc.driverName)
if attachLimit != tc.expectedVolumeLimit {
t.Errorf("expected volume limit to be %d got %d", tc.expectedVolumeLimit, attachLimit)
continue
}
// Node ID annotation
annNodeID, ok := node.Annotations[annotationKeyNodeID]
if ok {
if tc.expectedNodeIDMap == nil {
t.Errorf("expected annotation %q to not exist, but got: %q", annotationKeyNodeID, annNodeID)
} else {
var actualNodeIDs map[string]string
err = json.Unmarshal([]byte(annNodeID), &actualNodeIDs)
if err != nil {
t.Errorf("expected no error when parsing annotation %q, but got error: %v", annotationKeyNodeID, err)
}
if !helper.Semantic.DeepEqual(actualNodeIDs, tc.expectedNodeIDMap) {
t.Errorf("expected annotation %v; got: %v", tc.expectedNodeIDMap, actualNodeIDs)
}
}
} else {
if tc.expectedNodeIDMap != nil {
t.Errorf("expected annotation %q, but got none", annotationKeyNodeID)
}
}
if csiNodeInfoEnabled {
// Topology labels
if !helper.Semantic.DeepEqual(node.Labels, tc.expectedLabels) {
t.Errorf("expected topology labels to be %v; got: %v", tc.expectedLabels, node.Labels)
}
/* CSINodeInfo validation */
nodeInfo, err := csiClient.Csi().CSINodeInfos().Get(nodeName, metav1.GetOptions{})
if tc.expectNoNodeInfo && errors.IsNotFound(err) {
continue
} else if err != nil {
t.Errorf("error getting CSINodeInfo: %v", err)
continue
}
// Extract node IDs and topology keys
actualNodeIDs := make(map[string]string)
actualTopologyKeys := make(map[string]sets.String)
for _, driver := range nodeInfo.CSIDrivers {
actualNodeIDs[driver.Driver] = driver.NodeID
actualTopologyKeys[driver.Driver] = sets.NewString(driver.TopologyKeys...)
}
// Node IDs
if !helper.Semantic.DeepEqual(actualNodeIDs, tc.expectedNodeIDMap) {
t.Errorf("expected node IDs %v from CSINodeInfo; got: %v", tc.expectedNodeIDMap, actualNodeIDs)
}
// Topology keys
if !helper.Semantic.DeepEqual(actualTopologyKeys, tc.expectedTopologyMap) {
t.Errorf("expected topology keys %v from CSINodeInfo; got: %v", tc.expectedTopologyMap, actualTopologyKeys)
}
}
}
}
func getVolumeLimit(node *v1.Node, driverName string) int64 {
volumeLimits := map[v1.ResourceName]int64{}
nodeAllocatables := node.Status.Allocatable
for k, v := range nodeAllocatables {
if v1helper.IsAttachableVolumeResourceName(k) {
volumeLimits[k] = v.Value()
}
}
attachKey := v1.ResourceName(util.GetCSIAttachLimitKey(driverName))
attachLimit := volumeLimits[attachKey]
return attachLimit
}
func generateNode(nodeIDs, labels map[string]string, capacity map[v1.ResourceName]resource.Quantity) *v1.Node {
var annotations map[string]string
if len(nodeIDs) > 0 {
b, _ := json.Marshal(nodeIDs)
annotations = map[string]string{annotationKeyNodeID: string(b)}
}
node := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
Annotations: annotations,
Labels: labels,
},
}
if len(capacity) > 0 {
node.Status.Capacity = v1.ResourceList(capacity)
node.Status.Allocatable = v1.ResourceList(capacity)
}
return node
}
func generateNodeInfo(nodeIDs map[string]string, topologyKeys map[string][]string) *csiv1alpha1.CSINodeInfo {
var drivers []csiv1alpha1.CSIDriverInfo
for k, nodeID := range nodeIDs {
d := csiv1alpha1.CSIDriverInfo{
Driver: k,
NodeID: nodeID,
}
if top, exists := topologyKeys[k]; exists {
d.TopologyKeys = top
}
drivers = append(drivers, d)
}
return &csiv1alpha1.CSINodeInfo{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
CSIDrivers: drivers,
}
}

View File

@@ -16,10 +16,10 @@ go_library(
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
],
)
@@ -32,12 +32,12 @@ go_test(
"//pkg/volume:go_default_library",
"//pkg/volume/empty_dir:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
],
)

View File

@@ -20,8 +20,6 @@ import (
"fmt"
"path"
"path/filepath"
"sort"
"strings"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
@@ -187,6 +185,7 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
return err
}
setupSuccess := false
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
glog.Errorf("Unable to setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
return err
@@ -196,6 +195,21 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
return err
}
defer func() {
// Clean up directories if setup fails
if !setupSuccess {
unmounter, unmountCreateErr := b.plugin.NewUnmounter(b.volName, b.podUID)
if unmountCreateErr != nil {
glog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", b.volName, unmountCreateErr)
return
}
tearDownErr := unmounter.TearDown()
if tearDownErr != nil {
glog.Errorf("error tearing down volume %s with : %v", b.volName, tearDownErr)
}
}
}()
writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName)
writer, err := volumeutil.NewAtomicWriter(dir, writerContext)
if err != nil {
@@ -215,6 +229,7 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
return err
}
setupSuccess = true
return nil
}
@@ -244,7 +259,7 @@ func CollectData(items []v1.DownwardAPIVolumeFile, pod *v1.Pod, host volume.Volu
glog.Errorf("Unable to extract field %s: %s", fileInfo.FieldRef.FieldPath, err.Error())
errlist = append(errlist, err)
} else {
fileProjection.Data = []byte(sortLines(values))
fileProjection.Data = []byte(values)
}
} else if fileInfo.ResourceFieldRef != nil {
containerName := fileInfo.ResourceFieldRef.ContainerName
@@ -255,7 +270,7 @@ func CollectData(items []v1.DownwardAPIVolumeFile, pod *v1.Pod, host volume.Volu
glog.Errorf("Unable to extract field %s: %s", fileInfo.ResourceFieldRef.Resource, err.Error())
errlist = append(errlist, err)
} else {
fileProjection.Data = []byte(sortLines(values))
fileProjection.Data = []byte(values)
}
}
@@ -264,14 +279,6 @@ func CollectData(items []v1.DownwardAPIVolumeFile, pod *v1.Pod, host volume.Volu
return data, utilerrors.NewAggregate(errlist)
}
// sortLines sorts the strings generated from map based data
// (annotations and labels)
func sortLines(values string) string {
splitted := strings.Split(values, "\n")
sort.Strings(splitted)
return strings.Join(splitted, "\n")
}
func (d *downwardAPIVolume) GetPath() string {
return d.plugin.host.GetPodVolumeDir(d.podUID, utilstrings.EscapeQualifiedNameForDisk(downwardAPIPluginName), d.volName)
}

View File

@@ -82,8 +82,9 @@ func TestDownwardAPI(t *testing.T) {
"key3": "value3",
}
annotations := map[string]string{
"a1": "value1",
"a2": "value2",
"a1": "value1",
"a2": "value2",
"multiline": "c\nb\na",
}
testCases := []struct {
name string
@@ -318,8 +319,8 @@ func doVerifyLinesInFile(t *testing.T, volumePath, filename string, expected str
t.Errorf(err.Error())
return
}
actualStr := sortLines(string(data))
expectedStr := sortLines(expected)
actualStr := string(data)
expectedStr := expected
if actualStr != expectedStr {
t.Errorf("Found `%s`, expected `%s`", actualStr, expectedStr)
}

View File

@@ -11,42 +11,9 @@ go_library(
srcs = [
"doc.go",
"empty_dir.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"empty_dir_unsupported.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"empty_dir_unsupported.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"empty_dir_unsupported.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"empty_dir_unsupported.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"empty_dir_linux.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"empty_dir_unsupported.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"empty_dir_unsupported.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"empty_dir_unsupported.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"empty_dir_unsupported.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"empty_dir_unsupported.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"empty_dir_unsupported.go",
],
"//conditions:default": [],
}),
"empty_dir_linux.go",
"empty_dir_unsupported.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/empty_dir",
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
@@ -54,11 +21,11 @@ go_library(
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:linux": [
"//vendor/golang.org/x/sys/unix:go_default_library",
@@ -69,12 +36,7 @@ go_library(
go_test(
name = "go_default_test",
srcs = select({
"@io_bazel_rules_go//go/platform:linux": [
"empty_dir_test.go",
],
"//conditions:default": [],
}),
srcs = ["empty_dir_test.go"],
embed = [":go_default_library"],
deps = select({
"@io_bazel_rules_go//go/platform:linux": [
@@ -82,11 +44,11 @@ go_test(
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
],
"//conditions:default": [],
}),

View File

@@ -23,11 +23,11 @@ go_library(
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
],
)
@@ -43,11 +43,11 @@ go_test(
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
],
)

View File

@@ -40,8 +40,12 @@ type fcAttacher struct {
var _ volume.Attacher = &fcAttacher{}
var _ volume.DeviceMounter = &fcAttacher{}
var _ volume.AttachableVolumePlugin = &fcPlugin{}
var _ volume.DeviceMountableVolumePlugin = &fcPlugin{}
func (plugin *fcPlugin) NewAttacher() (volume.Attacher, error) {
return &fcAttacher{
host: plugin.host,
@@ -49,9 +53,13 @@ func (plugin *fcPlugin) NewAttacher() (volume.Attacher, error) {
}, nil
}
func (plugin *fcPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return plugin.NewAttacher()
}
func (plugin *fcPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
return mounter.GetMountRefs(deviceMountPath)
}
func (attacher *fcAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
@@ -129,6 +137,8 @@ type fcDetacher struct {
var _ volume.Detacher = &fcDetacher{}
var _ volume.DeviceUnmounter = &fcDetacher{}
func (plugin *fcPlugin) NewDetacher() (volume.Detacher, error) {
return &fcDetacher{
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
@@ -136,6 +146,10 @@ func (plugin *fcPlugin) NewDetacher() (volume.Detacher, error) {
}, nil
}
func (plugin *fcPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return plugin.NewDetacher()
}
func (detacher *fcDetacher) Detach(volumeName string, nodeName types.NodeName) error {
return nil
}

View File

@@ -237,7 +237,7 @@ func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volu
// globalPDPath : plugins/kubernetes.io/fc/50060e801049cfd1-lun-0
var globalPDPath string
mounter := plugin.host.GetMounter(plugin.GetPluginName())
paths, err := mount.GetMountRefs(mounter, mountPath)
paths, err := mounter.GetMountRefs(mountPath)
if err != nil {
return nil, err
}

View File

@@ -441,7 +441,7 @@ func Test_ConstructVolumeSpec(t *testing.T) {
"/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod2",
}
for _, path := range mountPaths {
refs, err := mount.GetMountRefs(fm, path)
refs, err := fm.GetMountRefs(path)
if err != nil {
t.Errorf("couldn't get mountrefs. err: %v", err)
}
@@ -488,7 +488,7 @@ func Test_ConstructVolumeSpecNoRefs(t *testing.T) {
"/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1",
}
for _, path := range mountPaths {
refs, _ := mount.GetMountRefs(fm, path)
refs, _ := fm.GetMountRefs(path)
var globalPDPath string
for _, ref := range refs {
if strings.Contains(ref, "kubernetes.io/fc") {

View File

@@ -365,7 +365,7 @@ func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath stri
if err.Error() != volumepathhandler.ErrDeviceNotFound {
return fmt.Errorf("fc: failed to get loopback for destination path: %v, err: %v", dstPath, err)
}
glog.Warning("fc: loopback for destination path: %s not found", dstPath)
glog.Warningf("fc: loopback for destination path: %s not found", dstPath)
}
// Detach volume from kubelet node

View File

@@ -32,11 +32,11 @@ go_library(
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/github.com/fsnotify/fsnotify:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
@@ -60,12 +60,12 @@ go_test(
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//vendor/github.com/fsnotify/fsnotify:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/exec/testing:go_default_library",
],

View File

@@ -30,13 +30,13 @@ type attacherDefaults flexVolumeAttacher
// Attach is part of the volume.Attacher interface
func (a *attacherDefaults) Attach(spec *volume.Spec, hostName types.NodeName) (string, error) {
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default Attach for volume ", spec.Name, ", host ", hostName)
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default Attach for volume ", spec.Name(), ", host ", hostName)
return "", nil
}
// WaitForAttach is part of the volume.Attacher interface
func (a *attacherDefaults) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) {
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default WaitForAttach for volume ", spec.Name, ", device ", devicePath)
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default WaitForAttach for volume ", spec.Name(), ", device ", devicePath)
return devicePath, nil
}
@@ -47,7 +47,7 @@ func (a *attacherDefaults) GetDeviceMountPath(spec *volume.Spec, mountsDir strin
// MountDevice is part of the volume.Attacher interface
func (a *attacherDefaults) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, mounter mount.Interface) error {
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default MountDevice for volume ", spec.Name, ", device ", devicePath, ", deviceMountPath ", deviceMountPath)
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default MountDevice for volume ", spec.Name(), ", device ", devicePath, ", deviceMountPath ", deviceMountPath)
volSourceFSType, err := getFSType(spec)
if err != nil {

View File

@@ -31,6 +31,8 @@ type flexVolumeAttacher struct {
var _ volume.Attacher = &flexVolumeAttacher{}
var _ volume.DeviceMounter = &flexVolumeAttacher{}
// Attach is part of the volume.Attacher interface
func (a *flexVolumeAttacher) Attach(spec *volume.Spec, hostName types.NodeName) (string, error) {

View File

@@ -32,6 +32,8 @@ type flexVolumeDetacher struct {
var _ volume.Detacher = &flexVolumeDetacher{}
var _ volume.DeviceUnmounter = &flexVolumeDetacher{}
// Detach is part of the volume.Detacher interface.
func (d *flexVolumeDetacher) Detach(volumeName string, hostName types.NodeName) error {
@@ -49,17 +51,24 @@ func (d *flexVolumeDetacher) Detach(volumeName string, hostName types.NodeName)
// UnmountDevice is part of the volume.Detacher interface.
func (d *flexVolumeDetacher) UnmountDevice(deviceMountPath string) error {
if pathExists, pathErr := util.PathExists(deviceMountPath); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
pathExists, pathErr := util.PathExists(deviceMountPath)
if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", deviceMountPath)
return nil
}
if pathErr != nil && !util.IsCorruptedMnt(pathErr) {
return fmt.Errorf("Error checking path: %v", pathErr)
}
notmnt, err := isNotMounted(d.plugin.host.GetMounter(d.plugin.GetPluginName()), deviceMountPath)
if err != nil {
return err
if util.IsCorruptedMnt(err) {
notmnt = false // Corrupted error is assumed to be mounted.
} else {
return err
}
}
if notmnt {
glog.Warningf("Warning: Path: %v already unmounted", deviceMountPath)
} else {

View File

@@ -28,6 +28,7 @@ import (
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/utils/exec"
)
const execScriptTempl1 = `#!/usr/bin/env bash
@@ -173,8 +174,9 @@ func TestCanSupport(t *testing.T) {
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
runner := exec.New()
installPluginUnderTest(t, "kubernetes.io", "fakeAttacher", tmpDir, execScriptTempl1, nil)
plugMgr.InitPlugins(nil, GetDynamicPluginProber(tmpDir), volumetest.NewFakeVolumeHost("fake", nil, nil))
plugMgr.InitPlugins(nil, GetDynamicPluginProber(tmpDir, runner), volumetest.NewFakeVolumeHost("fake", nil, nil))
plugin, err := plugMgr.FindPluginByName("flexvolume-kubernetes.io/fakeAttacher")
if err != nil {
t.Fatalf("Can't find the plugin by name")
@@ -201,8 +203,9 @@ func TestGetAccessModes(t *testing.T) {
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
runner := exec.New()
installPluginUnderTest(t, "kubernetes.io", "fakeAttacher", tmpDir, execScriptTempl1, nil)
plugMgr.InitPlugins(nil, GetDynamicPluginProber(tmpDir), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plugMgr.InitPlugins(nil, GetDynamicPluginProber(tmpDir, runner), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plugin, err := plugMgr.FindPersistentPluginByName("flexvolume-kubernetes.io/fakeAttacher")
if err != nil {

View File

@@ -58,13 +58,15 @@ type flexVolumeAttachablePlugin struct {
var _ volume.AttachableVolumePlugin = &flexVolumeAttachablePlugin{}
var _ volume.PersistentVolumePlugin = &flexVolumePlugin{}
var _ volume.DeviceMountableVolumePlugin = &flexVolumeAttachablePlugin{}
type PluginFactory interface {
NewFlexVolumePlugin(pluginDir, driverName string) (volume.VolumePlugin, error)
NewFlexVolumePlugin(pluginDir, driverName string, runner exec.Interface) (volume.VolumePlugin, error)
}
type pluginFactory struct{}
func (pluginFactory) NewFlexVolumePlugin(pluginDir, name string) (volume.VolumePlugin, error) {
func (pluginFactory) NewFlexVolumePlugin(pluginDir, name string, runner exec.Interface) (volume.VolumePlugin, error) {
execPath := path.Join(pluginDir, name)
driverName := utilstrings.UnescapePluginName(name)
@@ -72,7 +74,7 @@ func (pluginFactory) NewFlexVolumePlugin(pluginDir, name string) (volume.VolumeP
flexPlugin := &flexVolumePlugin{
driverName: driverName,
execPath: execPath,
runner: exec.New(),
runner: runner,
unsupportedCommands: []string{},
}
@@ -218,11 +220,19 @@ func (plugin *flexVolumeAttachablePlugin) NewAttacher() (volume.Attacher, error)
return &flexVolumeAttacher{plugin}, nil
}
func (plugin *flexVolumeAttachablePlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return plugin.NewAttacher()
}
// NewDetacher is part of the volume.AttachableVolumePlugin interface.
func (plugin *flexVolumeAttachablePlugin) NewDetacher() (volume.Detacher, error) {
return &flexVolumeDetacher{plugin}, nil
}
func (plugin *flexVolumeAttachablePlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return plugin.NewDetacher()
}
// ConstructVolumeSpec is part of the volume.AttachableVolumePlugin interface.
func (plugin *flexVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
flexVolume := &api.Volume{
@@ -265,7 +275,7 @@ func (plugin *flexVolumePlugin) isUnsupported(command string) bool {
func (plugin *flexVolumePlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
return mounter.GetMountRefs(deviceMountPath)
}
func (plugin *flexVolumePlugin) getDeviceMountPath(spec *volume.Spec) (string, error) {

View File

@@ -19,6 +19,7 @@ package flexvolume
import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/utils/exec"
"os"
@@ -26,16 +27,18 @@ import (
"path/filepath"
"sync"
"strings"
"github.com/fsnotify/fsnotify"
"k8s.io/apimachinery/pkg/util/errors"
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
"strings"
)
type flexVolumeProber struct {
mutex sync.Mutex
pluginDir string // Flexvolume driver directory
pluginDir string // Flexvolume driver directory
runner exec.Interface // Interface to use for execing flex calls
watcher utilfs.FSWatcher
factory PluginFactory
fs utilfs.Filesystem
@@ -43,11 +46,12 @@ type flexVolumeProber struct {
eventsMap map[string]volume.ProbeOperation // the key is the driver directory path, the value is the coresponding operation
}
func GetDynamicPluginProber(pluginDir string) volume.DynamicPluginProber {
func GetDynamicPluginProber(pluginDir string, runner exec.Interface) volume.DynamicPluginProber {
return &flexVolumeProber{
pluginDir: pluginDir,
watcher: utilfs.NewFsnotifyWatcher(),
factory: pluginFactory{},
runner: runner,
fs: &utilfs.DefaultFs{},
}
}
@@ -127,7 +131,7 @@ func (prober *flexVolumeProber) newProbeEvent(driverDirName string, op volume.Pr
Op: op,
}
if op == volume.ProbeAddOrUpdate {
plugin, pluginErr := prober.factory.NewFlexVolumePlugin(prober.pluginDir, driverDirName)
plugin, pluginErr := prober.factory.NewFlexVolumePlugin(prober.pluginDir, driverDirName, prober.runner)
if pluginErr != nil {
pluginErr = fmt.Errorf(
"Error creating Flexvolume plugin from directory %s, skipping. Error: %s",

View File

@@ -25,6 +25,7 @@ import (
"github.com/stretchr/testify/assert"
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/utils/exec"
)
const (
@@ -318,7 +319,7 @@ type fakePluginFactory struct {
var _ PluginFactory = fakePluginFactory{}
func (m fakePluginFactory) NewFlexVolumePlugin(_, driverName string) (volume.VolumePlugin, error) {
func (m fakePluginFactory) NewFlexVolumePlugin(_, driverName string, _ exec.Interface) (volume.VolumePlugin, error) {
if m.error {
return nil, fmt.Errorf("Flexvolume plugin error")
}

View File

@@ -44,13 +44,16 @@ func (f *flexVolumeUnmounter) TearDown() error {
func (f *flexVolumeUnmounter) TearDownAt(dir string) error {
if pathExists, pathErr := util.PathExists(dir); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
pathExists, pathErr := util.PathExists(dir)
if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
return nil
}
if pathErr != nil && !util.IsCorruptedMnt(pathErr) {
return fmt.Errorf("Error checking path: %v", pathErr)
}
call := f.plugin.NewDriverCall(unmountCmd)
call.Append(dir)
_, err := call.Run()

View File

@@ -21,13 +21,13 @@ go_library(
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//vendor/github.com/clusterhq/flocker-go:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
],
)
@@ -43,12 +43,12 @@ go_test(
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//vendor/github.com/clusterhq/flocker-go:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)

View File

@@ -49,7 +49,7 @@ func (util *FlockerUtil) DeleteVolume(d *flockerVolumeDeleter) error {
return d.flockerClient.DeleteDataset(datasetUUID)
}
func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID string, volumeSizeGB int, labels map[string]string, err error) {
func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID string, volumeSizeGiB int, labels map[string]string, err error) {
if c.flockerClient == nil {
c.flockerClient, err = c.plugin.newFlockerClient("")
@@ -74,7 +74,10 @@ func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
volumeSizeGB = int(volutil.RoundUpSize(requestBytes, 1024*1024*1024))
volumeSizeGiB, err = volutil.RoundUpToGiBInt(capacity)
if err != nil {
return
}
createOptions := &flockerapi.CreateDatasetOptions{
MaximumSize: requestBytes,

View File

@@ -21,18 +21,19 @@ go_library(
"//pkg/cloudprovider/providers/gce:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/util/file:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
@@ -43,6 +44,7 @@ go_test(
"attacher_test.go",
"gce_pd_block_test.go",
"gce_pd_test.go",
"gce_util_test.go",
],
embed = [":go_default_library"],
deps = [
@@ -51,14 +53,14 @@ go_test(
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)

View File

@@ -29,7 +29,6 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
@@ -41,8 +40,12 @@ type gcePersistentDiskAttacher struct {
var _ volume.Attacher = &gcePersistentDiskAttacher{}
var _ volume.DeviceMounter = &gcePersistentDiskAttacher{}
var _ volume.AttachableVolumePlugin = &gcePersistentDiskPlugin{}
var _ volume.DeviceMountableVolumePlugin = &gcePersistentDiskPlugin{}
func (plugin *gcePersistentDiskPlugin) NewAttacher() (volume.Attacher, error) {
gceCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
@@ -55,9 +58,13 @@ func (plugin *gcePersistentDiskPlugin) NewAttacher() (volume.Attacher, error) {
}, nil
}
func (plugin *gcePersistentDiskPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return plugin.NewAttacher()
}
func (plugin *gcePersistentDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
return mounter.GetMountRefs(deviceMountPath)
}
// Attach checks with the GCE cloud provider if the specified volume is already
@@ -158,7 +165,7 @@ func (attacher *gcePersistentDiskAttacher) WaitForAttach(spec *volume.Spec, devi
select {
case <-ticker.C:
glog.V(5).Infof("Checking GCE PD %q is attached.", pdName)
path, err := verifyDevicePath(devicePaths, sdBeforeSet)
path, err := verifyDevicePath(devicePaths, sdBeforeSet, pdName)
if err != nil {
// Log error, if any, and continue checking periodically. See issue #11321
glog.Errorf("Error verifying GCE PD (%q) is attached: %v", pdName, err)
@@ -227,6 +234,8 @@ type gcePersistentDiskDetacher struct {
var _ volume.Detacher = &gcePersistentDiskDetacher{}
var _ volume.DeviceUnmounter = &gcePersistentDiskDetacher{}
func (plugin *gcePersistentDiskPlugin) NewDetacher() (volume.Detacher, error) {
gceCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
@@ -239,6 +248,10 @@ func (plugin *gcePersistentDiskPlugin) NewDetacher() (volume.Detacher, error) {
}, nil
}
func (plugin *gcePersistentDiskPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return plugin.NewDetacher()
}
// Detach checks with the GCE cloud provider if the specified volume is already
// attached to the specified node. If the volume is not attached, it succeeds
// (returns nil). If it is attached, Detach issues a call to the GCE cloud

View File

@@ -32,6 +32,7 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/util/mount"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
@@ -303,7 +304,7 @@ func (plugin *gcePersistentDiskPlugin) ConstructVolumeSpec(volumeName, mountPath
// Abstract interface to PD operations.
type pdManager interface {
// Creates a volume
CreateVolume(provisioner *gcePersistentDiskProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error)
CreateVolume(provisioner *gcePersistentDiskProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error)
// Deletes a volume
DeleteVolume(deleter *gcePersistentDiskDeleter) error
}
@@ -471,7 +472,7 @@ func (c *gcePersistentDiskProvisioner) Provision(selectedNode *v1.Node, allowedT
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
}
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c)
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c, selectedNode, allowedTopologies)
if err != nil {
return nil, err
}
@@ -480,6 +481,15 @@ func (c *gcePersistentDiskProvisioner) Provision(selectedNode *v1.Node, allowedT
fstype = "ext4"
}
var volumeMode *v1.PersistentVolumeMode
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
volumeMode = c.options.PVC.Spec.VolumeMode
if volumeMode != nil && *volumeMode == v1.PersistentVolumeBlock {
// Block volumes should not have any FSType
fstype = ""
}
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: c.options.PVName,
@@ -492,8 +502,9 @@ func (c *gcePersistentDiskProvisioner) Provision(selectedNode *v1.Node, allowedT
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
AccessModes: c.options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dG", sizeGB)),
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
VolumeMode: volumeMode,
PersistentVolumeSource: v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: volumeID,
@@ -509,17 +520,31 @@ func (c *gcePersistentDiskProvisioner) Provision(selectedNode *v1.Node, allowedT
pv.Spec.AccessModes = c.plugin.GetAccessModes()
}
requirements := make([]v1.NodeSelectorRequirement, 0)
if len(labels) != 0 {
if pv.Labels == nil {
pv.Labels = make(map[string]string)
}
for k, v := range labels {
pv.Labels[k] = v
var values []string
if k == kubeletapis.LabelZoneFailureDomain {
values, err = util.LabelZonesToList(v)
if err != nil {
return nil, fmt.Errorf("failed to convert label string for Zone: %s to a List: %v", v, err)
}
} else {
values = []string{v}
}
requirements = append(requirements, v1.NodeSelectorRequirement{Key: k, Operator: v1.NodeSelectorOpIn, Values: values})
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
pv.Spec.VolumeMode = c.options.PVC.Spec.VolumeMode
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) && len(requirements) > 0 {
pv.Spec.NodeAffinity = new(v1.VolumeNodeAffinity)
pv.Spec.NodeAffinity.Required = new(v1.NodeSelector)
pv.Spec.NodeAffinity.Required.NodeSelectorTerms = make([]v1.NodeSelectorTerm, 1)
pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions = requirements
}
return pv, nil

View File

@@ -20,6 +20,8 @@ import (
"fmt"
"os"
"path"
"reflect"
"sort"
"testing"
"k8s.io/api/core/v1"
@@ -27,10 +29,12 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
func TestCanSupport(t *testing.T) {
@@ -78,9 +82,10 @@ func TestGetAccessModes(t *testing.T) {
type fakePDManager struct {
}
func (fake *fakePDManager) CreateVolume(c *gcePersistentDiskProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) {
func (fake *fakePDManager) CreateVolume(c *gcePersistentDiskProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) {
labels = make(map[string]string)
labels["fakepdmanager"] = "yes"
labels[kubeletapis.LabelZoneFailureDomain] = "zone1__zone2"
return "test-gce-volume-name", 100, labels, "", nil
}
@@ -91,6 +96,15 @@ func (fake *fakePDManager) DeleteVolume(cd *gcePersistentDiskDeleter) error {
return nil
}
func getNodeSelectorRequirementWithKey(key string, term v1.NodeSelectorTerm) (*v1.NodeSelectorRequirement, error) {
for _, r := range term.MatchExpressions {
if r.Key == key {
return &r, nil
}
}
return nil, fmt.Errorf("key %s not found", key)
}
func TestPlugin(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("gcepdTest")
if err != nil {
@@ -177,12 +191,40 @@ func TestPlugin(t *testing.T) {
}
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
size := cap.Value()
if size != 100*util.GB {
if size != 100*util.GIB {
t.Errorf("Provision() returned unexpected volume size: %v", size)
}
if persistentSpec.Labels["fakepdmanager"] != "yes" {
t.Errorf("Provision() returned unexpected labels: %v", persistentSpec.Labels)
t.Errorf("Provision() returned unexpected value for fakepdmanager: %v", persistentSpec.Labels["fakepdmanager"])
}
if persistentSpec.Labels[kubeletapis.LabelZoneFailureDomain] != "zone1__zone2" {
t.Errorf("Provision() returned unexpected value for %s: %v", kubeletapis.LabelZoneFailureDomain, persistentSpec.Labels[kubeletapis.LabelZoneFailureDomain])
}
if persistentSpec.Spec.NodeAffinity == nil {
t.Errorf("Unexpected nil NodeAffinity found")
}
if len(persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms) != 1 {
t.Errorf("Unexpected number of NodeSelectorTerms")
}
term := persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms[0]
if len(term.MatchExpressions) != 2 {
t.Errorf("Unexpected number of NodeSelectorRequirements in volume NodeAffinity: %d", len(term.MatchExpressions))
}
r, _ := getNodeSelectorRequirementWithKey("fakepdmanager", term)
if r == nil || r.Values[0] != "yes" || r.Operator != v1.NodeSelectorOpIn {
t.Errorf("NodeSelectorRequirement fakepdmanager-in-yes not found in volume NodeAffinity")
}
zones, _ := volumeutil.ZonesToSet("zone1,zone2")
r, _ = getNodeSelectorRequirementWithKey(kubeletapis.LabelZoneFailureDomain, term)
if r == nil {
t.Errorf("NodeSelectorRequirement %s-in-%v not found in volume NodeAffinity", kubeletapis.LabelZoneFailureDomain, zones)
}
sort.Strings(r.Values)
if !reflect.DeepEqual(r.Values, zones.List()) {
t.Errorf("ZoneFailureDomain elements %v does not match zone labels %v", r.Values, zones)
}
// Test Deleter

View File

@@ -20,6 +20,7 @@ import (
"fmt"
"path"
"path/filepath"
"regexp"
"strings"
"time"
@@ -31,6 +32,7 @@ import (
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
utilfile "k8s.io/kubernetes/pkg/util/file"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/utils/exec"
@@ -50,12 +52,19 @@ const (
// Replication type constants must be lower case.
replicationTypeNone = "none"
replicationTypeRegionalPD = "regional-pd"
// scsi_id output should be in the form of:
// 0Google PersistentDisk <disk name>
scsiPattern = `^0Google\s+PersistentDisk\s+([\S]+)\s*$`
)
// These variables are modified only in unit tests and should be constant
// otherwise.
var (
// errorSleepDuration is modified only in unit tests and should be constant
// otherwise.
errorSleepDuration time.Duration = 5 * time.Second
// regex to parse scsi_id output and extract the serial
scsiRegex = regexp.MustCompile(scsiPattern)
)
type GCEDiskUtil struct{}
@@ -78,7 +87,7 @@ func (util *GCEDiskUtil) DeleteVolume(d *gcePersistentDiskDeleter) error {
// CreateVolume creates a GCE PD.
// Returns: gcePDName, volumeSizeGB, labels, fsType, error
func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (string, int, map[string]string, string, error) {
func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (string, int, map[string]string, string, error) {
cloud, err := getCloudProvider(c.gcePersistentDisk.plugin.host.GetCloudProvider())
if err != nil {
return "", 0, nil, "", err
@@ -86,8 +95,8 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
name := volumeutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 63) // GCE PD name can have up to 63 characters
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
// GCE PDs are allocated in chunks of GBs (not GiBs)
requestGB := volumeutil.RoundUpToGB(capacity)
// GCE PDs are allocated in chunks of GiBs
requestGB := volumeutil.RoundUpToGiB(capacity)
// Apply Parameters.
// Values for parameter "replication-type" are canonicalized to lower case.
@@ -95,7 +104,7 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
// to the cloud provider.
diskType := ""
configuredZone := ""
configuredZones := ""
var configuredZones sets.String
zonePresent := false
zonesPresent := false
replicationType := replicationTypeNone
@@ -109,7 +118,10 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
configuredZone = v
case "zones":
zonesPresent = true
configuredZones = v
configuredZones, err = volumeutil.ZonesToSet(v)
if err != nil {
return "", 0, nil, "", err
}
case "replication-type":
if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
return "", 0, nil, "",
@@ -124,76 +136,49 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
}
}
if zonePresent && zonesPresent {
return "", 0, nil, "", fmt.Errorf("the 'zone' and 'zones' StorageClass parameters must not be used at the same time")
}
if replicationType == replicationTypeRegionalPD && zonePresent {
// If a user accidentally types 'zone' instead of 'zones', we want to throw an error
// instead of assuming that 'zones' is empty and proceed by randomly selecting zones.
return "", 0, nil, "", fmt.Errorf("the '%s' replication type does not support the 'zone' parameter; use 'zones' instead", replicationTypeRegionalPD)
}
// TODO: implement PVC.Selector parsing
if c.options.PVC.Spec.Selector != nil {
return "", 0, nil, "", fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on GCE")
}
var activezones sets.String
activezones, err = cloud.GetAllCurrentZones()
if err != nil {
return "", 0, nil, "", err
}
switch replicationType {
case replicationTypeRegionalPD:
err = createRegionalPD(
name,
c.options.PVC.Name,
diskType,
configuredZones,
requestGB,
c.options.CloudTags,
cloud)
selectedZones, err := volumeutil.SelectZonesForVolume(zonePresent, zonesPresent, configuredZone, configuredZones, activezones, node, allowedTopologies, c.options.PVC.Name, maxRegionalPDZones)
if err != nil {
glog.V(2).Infof("Error selecting zones for regional GCE PD volume: %v", err)
return "", 0, nil, "", err
}
if err = cloud.CreateRegionalDisk(
name,
diskType,
selectedZones,
int64(requestGB),
*c.options.CloudTags); err != nil {
glog.V(2).Infof("Error creating regional GCE PD volume: %v", err)
return "", 0, nil, "", err
}
glog.V(2).Infof("Successfully created Regional GCE PD volume %s", name)
case replicationTypeNone:
var zones sets.String
if !zonePresent && !zonesPresent {
// 00 - neither "zone" or "zones" specified
// Pick a zone randomly selected from all active zones where
// Kubernetes cluster has a node.
zones, err = cloud.GetAllCurrentZones()
if err != nil {
glog.V(2).Infof("error getting zone information from GCE: %v", err)
return "", 0, nil, "", err
}
} else if !zonePresent && zonesPresent {
// 01 - "zones" specified
// Pick a zone randomly selected from specified set.
if zones, err = volumeutil.ZonesToSet(configuredZones); err != nil {
return "", 0, nil, "", err
}
} else if zonePresent && !zonesPresent {
// 10 - "zone" specified
// Use specified zone
if err := volumeutil.ValidateZone(configuredZone); err != nil {
return "", 0, nil, "", err
}
zones = make(sets.String)
zones.Insert(configuredZone)
selectedZone, err := volumeutil.SelectZoneForVolume(zonePresent, zonesPresent, configuredZone, configuredZones, activezones, node, allowedTopologies, c.options.PVC.Name)
if err != nil {
return "", 0, nil, "", err
}
zone := volumeutil.ChooseZoneForVolume(zones, c.options.PVC.Name)
if err := cloud.CreateDisk(
name,
diskType,
zone,
selectedZone,
int64(requestGB),
*c.options.CloudTags); err != nil {
glog.V(2).Infof("Error creating single-zone GCE PD volume: %v", err)
return "", 0, nil, "", err
}
glog.V(2).Infof("Successfully created single-zone GCE PD volume %s", name)
default:
@@ -209,61 +194,12 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
return name, int(requestGB), labels, fstype, nil
}
// Creates a Regional PD
func createRegionalPD(
diskName string,
pvcName string,
diskType string,
zonesString string,
requestGB int64,
cloudTags *map[string]string,
cloud *gcecloud.GCECloud) error {
var replicaZones sets.String
var err error
if zonesString == "" {
// Consider all zones
replicaZones, err = cloud.GetAllCurrentZones()
if err != nil {
glog.V(2).Infof("error getting zone information from GCE: %v", err)
return err
}
} else {
replicaZones, err = volumeutil.ZonesToSet(zonesString)
if err != nil {
return err
}
}
zoneCount := replicaZones.Len()
var selectedReplicaZones sets.String
if zoneCount < maxRegionalPDZones {
return fmt.Errorf("cannot specify only %d zone(s) for Regional PDs.", zoneCount)
} else if zoneCount == maxRegionalPDZones {
selectedReplicaZones = replicaZones
} else {
// Must randomly select zones
selectedReplicaZones = volumeutil.ChooseZonesForVolume(
replicaZones, pvcName, maxRegionalPDZones)
}
if err = cloud.CreateRegionalDisk(
diskName,
diskType,
selectedReplicaZones,
int64(requestGB),
*cloudTags); err != nil {
return err
}
return nil
}
// Returns the first path that exists, or empty string if none exist.
func verifyDevicePath(devicePaths []string, sdBeforeSet sets.String) (string, error) {
func verifyDevicePath(devicePaths []string, sdBeforeSet sets.String, diskName string) (string, error) {
if err := udevadmChangeToNewDrives(sdBeforeSet); err != nil {
// udevadm errors should not block disk detachment, log and continue
// It's possible udevadm was called on other disks so it should not block this
// call. If it did fail on this disk, then the devicePath will either
// not exist or be wrong. If it's wrong, then the scsi_id check below will fail.
glog.Errorf("udevadmChangeToNewDrives failed with: %v", err)
}
@@ -271,6 +207,22 @@ func verifyDevicePath(devicePaths []string, sdBeforeSet sets.String) (string, er
if pathExists, err := volumeutil.PathExists(path); err != nil {
return "", fmt.Errorf("Error checking if path exists: %v", err)
} else if pathExists {
// validate that the path actually resolves to the correct disk
serial, err := getScsiSerial(path, diskName)
if err != nil {
return "", fmt.Errorf("failed to get scsi serial %v", err)
}
if serial != diskName {
// The device link is not pointing to the correct device
// Trigger udev on this device to try to fix the link
if udevErr := udevadmChangeToDrive(path); udevErr != nil {
glog.Errorf("udevadmChangeToDrive %q failed with: %v", path, err)
}
// Return error to retry WaitForAttach and verifyDevicePath
return "", fmt.Errorf("scsi_id serial %q for device %q doesn't match disk %q", serial, path, diskName)
}
// The device link is correct
return path, nil
}
}
@@ -278,22 +230,38 @@ func verifyDevicePath(devicePaths []string, sdBeforeSet sets.String) (string, er
return "", nil
}
// Returns the first path that exists, or empty string if none exist.
func verifyAllPathsRemoved(devicePaths []string) (bool, error) {
allPathsRemoved := true
for _, path := range devicePaths {
if err := udevadmChangeToDrive(path); err != nil {
// udevadm errors should not block disk detachment, log and continue
glog.Errorf("%v", err)
}
if exists, err := volumeutil.PathExists(path); err != nil {
return false, fmt.Errorf("Error checking if path exists: %v", err)
} else {
allPathsRemoved = allPathsRemoved && !exists
}
// Calls scsi_id on the given devicePath to get the serial number reported by that device.
func getScsiSerial(devicePath, diskName string) (string, error) {
exists, err := utilfile.FileExists("/lib/udev/scsi_id")
if err != nil {
return "", fmt.Errorf("failed to check scsi_id existence: %v", err)
}
return allPathsRemoved, nil
if !exists {
glog.V(6).Infof("scsi_id doesn't exist; skipping check for %v", devicePath)
return diskName, nil
}
out, err := exec.New().Command(
"/lib/udev/scsi_id",
"--page=0x83",
"--whitelisted",
fmt.Sprintf("--device=%v", devicePath)).CombinedOutput()
if err != nil {
return "", fmt.Errorf("scsi_id failed for device %q with %v.", devicePath, err)
}
return parseScsiSerial(string(out))
}
// Parse the output returned by scsi_id and extract the serial number
func parseScsiSerial(output string) (string, error) {
substrings := scsiRegex.FindStringSubmatch(output)
if substrings == nil {
return "", fmt.Errorf("scsi_id output cannot be parsed: %q", output)
}
return substrings[1], nil
}
// Returns list of all /dev/disk/by-id/* paths for given PD.

View File

@@ -0,0 +1,62 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gce_pd
import "testing"
func TestParseScsiSerial(t *testing.T) {
cases := []struct {
name string
output string
diskName string
expectErr bool
}{
{
name: "valid",
output: "0Google PersistentDisk test-disk",
diskName: "test-disk",
},
{
name: "valid with newline",
output: "0Google PersistentDisk test-disk\n",
diskName: "test-disk",
},
{
name: "invalid prefix",
output: "00Google PersistentDisk test-disk",
expectErr: true,
},
{
name: "invalid suffix",
output: "0Google PersistentDisk test-disk more",
expectErr: true,
},
}
for _, test := range cases {
serial, err := parseScsiSerial(test.output)
if err != nil && !test.expectErr {
t.Errorf("test %v failed: %v", test.name, err)
}
if err == nil && test.expectErr {
t.Errorf("test %q failed: got success", test.name)
}
if serial != test.diskName {
t.Errorf("test %v failed: expected serial %q, got %q", test.name, test.diskName, serial)
}
}
}

View File

@@ -17,8 +17,8 @@ go_library(
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
@@ -31,9 +31,9 @@ go_test(
"//pkg/volume:go_default_library",
"//pkg/volume/empty_dir:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/exec/testing:go_default_library",
],

View File

@@ -21,18 +21,18 @@ go_library(
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/heketi/heketi/client/api/go-client:go_default_library",
"//vendor/github.com/heketi/heketi/pkg/glusterfs/api:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
@@ -47,14 +47,14 @@ go_test(
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//vendor/github.com/heketi/heketi/pkg/glusterfs/api:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)

View File

@@ -288,23 +288,43 @@ func (c *glusterfsUnmounter) TearDownAt(dir string) error {
func (b *glusterfsMounter) setUpAtInternal(dir string) error {
var errs error
options := []string{}
hasLogFile := false
log := ""
if b.readOnly {
options = append(options, "ro")
}
p := path.Join(b.glusterfs.plugin.host.GetPluginDir(glusterfsPluginName), b.glusterfs.volName)
if err := os.MkdirAll(p, 0750); err != nil {
return fmt.Errorf("failed to create directory %v: %v", p, err)
// Check logfile has been provided by user, if provided, use that as the log file.
for _, userOpt := range b.mountOptions {
if dstrings.HasPrefix(userOpt, "log-file") {
glog.V(4).Infof("log-file mount option has provided")
hasLogFile = true
break
}
}
// If logfile has not been provided, create driver specific log file.
if !hasLogFile {
log = ""
p := path.Join(b.glusterfs.plugin.host.GetPluginDir(glusterfsPluginName), b.glusterfs.volName)
if err := os.MkdirAll(p, 0750); err != nil {
return fmt.Errorf("failed to create directory %v: %v", p, err)
}
// adding log-level ERROR to remove noise
// and more specific log path so each pod has
// its own log based on PV + Pod
log = path.Join(p, b.pod.Name+"-glusterfs.log")
// Use derived log file in gluster fuse mount
options = append(options, "log-file="+log)
}
// adding log-level ERROR to remove noise
// and more specific log path so each pod has
// its own log based on PV + Pod
log := path.Join(p, b.pod.Name+"-glusterfs.log")
options = append(options, "log-level=ERROR")
options = append(options, "log-file="+log)
var addrlist []string
if b.hosts == nil {
@@ -322,7 +342,11 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
}
}
//Add backup-volfile-servers and auto_unmount options.
options = append(options, "backup-volfile-servers="+dstrings.Join(addrlist[:], ":"))
options = append(options, "auto_unmount")
mountOptions := volutil.JoinMountOptions(b.mountOptions, options)
// with `backup-volfile-servers` mount option in place, it is not required to
@@ -686,8 +710,6 @@ func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTop
}
p.provisionerConfig = *cfg
glog.V(4).Infof("creating volume with configuration %+v", p.provisionerConfig)
gidTable, err := p.plugin.getGidTable(scName, cfg.gidMin, cfg.gidMax)
if err != nil {
return nil, fmt.Errorf("failed to get gidTable: %v", err)
@@ -719,6 +741,7 @@ func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTop
if len(pv.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = p.plugin.GetAccessModes()
}
pv.Spec.MountOptions = p.options.MountOptions
gidStr := strconv.FormatInt(int64(gid), 10)
@@ -728,7 +751,6 @@ func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTop
volutil.VolumeDynamicallyCreatedByKey: heketiAnn,
glusterTypeAnn: "file",
"Description": glusterDescAnn,
v1.MountOptionAnnotation: "auto_unmount",
heketiVolIDAnn: volID,
}
@@ -744,7 +766,10 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolum
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
// GlusterFS/heketi creates volumes in units of GiB.
sz := int(volutil.RoundUpToGiB(capacity))
sz, err := volutil.RoundUpToGiBInt(capacity)
if err != nil {
return nil, 0, "", err
}
glog.V(2).Infof("create volume of size %dGiB", sz)
if p.url == "" {
@@ -1145,7 +1170,7 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res
return oldSize, err
}
glog.V(4).Infof("expanding volume: %q with configuration: %+v", volumeID, cfg)
glog.V(4).Infof("expanding volume: %q", volumeID)
//Create REST server connection
cli := gcli.NewClient(cfg.url, cfg.user, cfg.secretValue)

View File

@@ -19,10 +19,10 @@ go_library(
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/recyclerclient:go_default_library",
"//pkg/volume/validation:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
],
)
@@ -35,12 +35,12 @@ go_test(
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
],
)

View File

@@ -8,3 +8,4 @@ reviewers:
- rootfs
- jingxu97
- msau42
- dixudx

View File

@@ -132,7 +132,7 @@ func (plugin *hostPathPlugin) NewUnmounter(volName string, podUID types.UID) (vo
// HostPath recycling only works in single node clusters and is meant for testing purposes only.
func (plugin *hostPathPlugin) Recycle(pvName string, spec *volume.Spec, eventRecorder recyclerclient.RecycleEventRecorder) error {
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.HostPath == nil {
return fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil")
return fmt.Errorf("spec.PersistentVolume.Spec.HostPath is nil")
}
pod := plugin.config.RecyclerPodTemplate

View File

@@ -17,7 +17,6 @@ limitations under the License.
package host_path
import (
"errors"
"fmt"
"os"
"testing"
@@ -319,92 +318,6 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
}
}
type fakeFileTypeChecker struct {
desiredType string
}
func (fftc *fakeFileTypeChecker) Mount(source string, target string, fstype string, options []string) error {
return nil
}
func (fftc *fakeFileTypeChecker) Unmount(target string) error {
return nil
}
func (fftc *fakeFileTypeChecker) List() ([]utilmount.MountPoint, error) {
return nil, nil
}
func (fftc *fakeFileTypeChecker) IsMountPointMatch(mp utilmount.MountPoint, dir string) bool {
return false
}
func (fftc *fakeFileTypeChecker) IsNotMountPoint(file string) (bool, error) {
return false, nil
}
func (fftc *fakeFileTypeChecker) IsLikelyNotMountPoint(file string) (bool, error) {
return false, nil
}
func (fftc *fakeFileTypeChecker) DeviceOpened(pathname string) (bool, error) {
return false, nil
}
func (fftc *fakeFileTypeChecker) PathIsDevice(pathname string) (bool, error) {
return false, nil
}
func (fftc *fakeFileTypeChecker) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) {
return "fake", nil
}
func (fftc *fakeFileTypeChecker) MakeRShared(path string) error {
return nil
}
func (fftc *fakeFileTypeChecker) MakeFile(pathname string) error {
return nil
}
func (fftc *fakeFileTypeChecker) MakeDir(pathname string) error {
return nil
}
func (fftc *fakeFileTypeChecker) ExistsPath(pathname string) (bool, error) {
return true, nil
}
func (fftc *fakeFileTypeChecker) GetFileType(_ string) (utilmount.FileType, error) {
return utilmount.FileType(fftc.desiredType), nil
}
func (fftc *fakeFileTypeChecker) PrepareSafeSubpath(subPath utilmount.Subpath) (newHostPath string, cleanupAction func(), err error) {
return "", nil, nil
}
func (fftc *fakeFileTypeChecker) CleanSubPaths(_, _ string) error {
return nil
}
func (fftc *fakeFileTypeChecker) SafeMakeDir(_, _ string, _ os.FileMode) error {
return nil
}
func (fftc *fakeFileTypeChecker) GetMountRefs(pathname string) ([]string, error) {
return nil, errors.New("not implemented")
}
func (fftc *fakeFileTypeChecker) GetFSGroup(pathname string) (int64, error) {
return -1, errors.New("not implemented")
}
func (fftc *fakeFileTypeChecker) GetSELinuxSupport(pathname string) (bool, error) {
return false, errors.New("not implemented")
}
func (fftc *fakeFileTypeChecker) GetMode(pathname string) (os.FileMode, error) {
return 0, errors.New("not implemented")
}
func setUp() error {
err := os.MkdirAll("/tmp/ExistingFolder", os.FileMode(0755))
if err != nil {
@@ -473,7 +386,11 @@ func TestOSFileTypeChecker(t *testing.T) {
}
for i, tc := range testCases {
fakeFTC := &fakeFileTypeChecker{desiredType: tc.desiredType}
fakeFTC := &utilmount.FakeMounter{
Filesystem: map[string]utilmount.FileType{
tc.path: utilmount.FileType(tc.desiredType),
},
}
oftc := newFileTypeChecker(tc.path, fakeFTC)
path := oftc.GetPath()

View File

@@ -18,16 +18,17 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/volume/iscsi",
deps = [
"//pkg/features:go_default_library",
"//pkg/util/keymutex:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
],
)
@@ -42,11 +43,11 @@ go_test(
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
],
)

View File

@@ -26,30 +26,41 @@ import (
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/keymutex"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
type iscsiAttacher struct {
host volume.VolumeHost
manager diskManager
host volume.VolumeHost
targetLocks keymutex.KeyMutex
manager diskManager
}
var _ volume.Attacher = &iscsiAttacher{}
var _ volume.DeviceMounter = &iscsiAttacher{}
var _ volume.AttachableVolumePlugin = &iscsiPlugin{}
var _ volume.DeviceMountableVolumePlugin = &iscsiPlugin{}
func (plugin *iscsiPlugin) NewAttacher() (volume.Attacher, error) {
return &iscsiAttacher{
host: plugin.host,
manager: &ISCSIUtil{},
host: plugin.host,
targetLocks: plugin.targetLocks,
manager: &ISCSIUtil{},
}, nil
}
func (plugin *iscsiPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return plugin.NewAttacher()
}
func (plugin *iscsiPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(iscsiPluginName)
return mount.GetMountRefs(mounter, deviceMountPath)
return mounter.GetMountRefs(deviceMountPath)
}
func (attacher *iscsiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
@@ -66,7 +77,7 @@ func (attacher *iscsiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName
}
func (attacher *iscsiAttacher) WaitForAttach(spec *volume.Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error) {
mounter, err := volumeSpecToMounter(spec, attacher.host, pod)
mounter, err := volumeSpecToMounter(spec, attacher.host, attacher.targetLocks, pod)
if err != nil {
glog.Warningf("failed to get iscsi mounter: %v", err)
return "", err
@@ -76,7 +87,7 @@ func (attacher *iscsiAttacher) WaitForAttach(spec *volume.Spec, devicePath strin
func (attacher *iscsiAttacher) GetDeviceMountPath(
spec *volume.Spec) (string, error) {
mounter, err := volumeSpecToMounter(spec, attacher.host, nil)
mounter, err := volumeSpecToMounter(spec, attacher.host, attacher.targetLocks, nil)
if err != nil {
glog.Warningf("failed to get iscsi mounter: %v", err)
return "", err
@@ -126,24 +137,32 @@ type iscsiDetacher struct {
host volume.VolumeHost
mounter mount.Interface
manager diskManager
plugin *iscsiPlugin
}
var _ volume.Detacher = &iscsiDetacher{}
var _ volume.DeviceUnmounter = &iscsiDetacher{}
func (plugin *iscsiPlugin) NewDetacher() (volume.Detacher, error) {
return &iscsiDetacher{
host: plugin.host,
mounter: plugin.host.GetMounter(iscsiPluginName),
manager: &ISCSIUtil{},
plugin: plugin,
}, nil
}
func (plugin *iscsiPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return plugin.NewDetacher()
}
func (detacher *iscsiDetacher) Detach(volumeName string, nodeName types.NodeName) error {
return nil
}
func (detacher *iscsiDetacher) UnmountDevice(deviceMountPath string) error {
unMounter := volumeSpecToUnmounter(detacher.mounter, detacher.host)
unMounter := volumeSpecToUnmounter(detacher.mounter, detacher.host, detacher.plugin)
err := detacher.manager.DetachDisk(*unMounter, deviceMountPath)
if err != nil {
return fmt.Errorf("iscsi: failed to detach disk: %s\nError: %v", deviceMountPath, err)
@@ -157,7 +176,7 @@ func (detacher *iscsiDetacher) UnmountDevice(deviceMountPath string) error {
return nil
}
func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, pod *v1.Pod) (*iscsiDiskMounter, error) {
func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, targetLocks keymutex.KeyMutex, pod *v1.Pod) (*iscsiDiskMounter, error) {
var secret map[string]string
readOnly, fsType, err := getISCSIVolumeInfo(spec)
if err != nil {
@@ -165,7 +184,7 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, pod *v1.Pod)
}
var podUID types.UID
if pod != nil {
secret, err = createSecretMap(spec, &iscsiPlugin{host: host}, pod.Namespace)
secret, err = createSecretMap(spec, &iscsiPlugin{host: host, targetLocks: targetLocks}, pod.Namespace)
if err != nil {
return nil, err
}
@@ -173,7 +192,7 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, pod *v1.Pod)
}
iscsiDisk, err := createISCSIDisk(spec,
podUID,
&iscsiPlugin{host: host},
&iscsiPlugin{host: host, targetLocks: targetLocks},
&ISCSIUtil{},
secret,
)
@@ -208,13 +227,14 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, pod *v1.Pod)
}, nil
}
func volumeSpecToUnmounter(mounter mount.Interface, host volume.VolumeHost) *iscsiDiskUnmounter {
func volumeSpecToUnmounter(mounter mount.Interface, host volume.VolumeHost, plugin *iscsiPlugin) *iscsiDiskUnmounter {
exec := host.GetExec(iscsiPluginName)
return &iscsiDiskUnmounter{
iscsiDisk: &iscsiDisk{
plugin: &iscsiPlugin{},
plugin: plugin,
},
mounter: mounter,
exec: exec,
mounter: mounter,
exec: exec,
deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()),
}
}

View File

@@ -27,6 +27,7 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/keymutex"
"k8s.io/kubernetes/pkg/util/mount"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
@@ -36,11 +37,12 @@ import (
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&iscsiPlugin{nil}}
return []volume.VolumePlugin{&iscsiPlugin{}}
}
type iscsiPlugin struct {
host volume.VolumeHost
host volume.VolumeHost
targetLocks keymutex.KeyMutex
}
var _ volume.VolumePlugin = &iscsiPlugin{}
@@ -53,6 +55,7 @@ const (
func (plugin *iscsiPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
plugin.targetLocks = keymutex.NewHashed(0)
return nil
}
@@ -176,8 +179,9 @@ func (plugin *iscsiPlugin) newUnmounterInternal(volName string, podUID types.UID
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(plugin.host.GetPodVolumeDir(podUID, utilstrings.EscapeQualifiedNameForDisk(iscsiPluginName), volName)),
},
mounter: mounter,
exec: exec,
mounter: mounter,
exec: exec,
deviceUtil: ioutil.NewDeviceHandler(ioutil.NewIOHandler()),
}, nil
}
@@ -203,7 +207,7 @@ func (plugin *iscsiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*v
// Find globalPDPath from pod volume directory(mountPath)
var globalPDPath string
mounter := plugin.host.GetMounter(plugin.GetPluginName())
paths, err := mount.GetMountRefs(mounter, mountPath)
paths, err := mounter.GetMountRefs(mountPath)
if err != nil {
return nil, err
}
@@ -281,7 +285,7 @@ func (iscsi *iscsiDisk) GetPath() string {
}
func (iscsi *iscsiDisk) iscsiGlobalMapPath(spec *volume.Spec) (string, error) {
mounter, err := volumeSpecToMounter(spec, iscsi.plugin.host, nil /* pod */)
mounter, err := volumeSpecToMounter(spec, iscsi.plugin.host, iscsi.plugin.targetLocks, nil /* pod */)
if err != nil {
glog.Warningf("failed to get iscsi mounter: %v", err)
return "", err
@@ -337,8 +341,9 @@ func (b *iscsiDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
type iscsiDiskUnmounter struct {
*iscsiDisk
mounter mount.Interface
exec mount.Exec
mounter mount.Interface
exec mount.Exec
deviceUtil ioutil.DeviceUtil
}
var _ volume.Unmounter = &iscsiDiskUnmounter{}

View File

@@ -23,6 +23,7 @@ import (
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
@@ -212,6 +213,59 @@ func (util *ISCSIUtil) loadISCSI(conf *iscsiDisk, mnt string) error {
return nil
}
// scanOneLun scans a single LUN on one SCSI bus
// Use this to avoid scanning the whole SCSI bus for all of the LUNs, which
// would result in the kernel on this node discovering LUNs that it shouldn't
// know about. Extraneous LUNs cause problems because they may get deleted
// without us getting notified, since we were never supposed to know about
// them. When LUNs are deleted without proper cleanup in the kernel, I/O errors
// and timeouts result, which can noticeably degrade performance of future
// operations.
func scanOneLun(hostNumber int, lunNumber int) error {
filename := fmt.Sprintf("/sys/class/scsi_host/host%d/scan", hostNumber)
fd, err := os.OpenFile(filename, os.O_WRONLY, 0)
if err != nil {
return err
}
defer fd.Close()
// Channel/Target are always 0 for iSCSI
scanCmd := fmt.Sprintf("0 0 %d", lunNumber)
if written, err := fd.WriteString(scanCmd); err != nil {
return err
} else if 0 == written {
return fmt.Errorf("No data written to file: %s", filename)
}
glog.V(3).Infof("Scanned SCSI host %d LUN %d", hostNumber, lunNumber)
return nil
}
func waitForMultiPathToExist(devicePaths []string, maxRetries int, deviceUtil volumeutil.DeviceUtil) string {
if 0 == len(devicePaths) {
return ""
}
for i := 0; i < maxRetries; i++ {
for _, path := range devicePaths {
// There shouldnt be any empty device paths. However adding this check
// for safer side to avoid the possibility of an empty entry.
if path == "" {
continue
}
// check if the dev is using mpio and if so mount it via the dm-XX device
if mappedDevicePath := deviceUtil.FindMultipathDeviceForDevice(path); mappedDevicePath != "" {
return mappedDevicePath
}
}
if i == maxRetries-1 {
break
}
time.Sleep(time.Second)
}
return ""
}
// AttachDisk returns devicePath of volume if attach succeeded otherwise returns error
func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) {
var devicePath string
@@ -242,12 +296,84 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) {
b.Iface = newIface
}
// Lock the target while we login to avoid races between 2 volumes that share the same
// target both logging in or one logging out while another logs in.
b.plugin.targetLocks.LockKey(b.Iqn)
defer b.plugin.targetLocks.UnlockKey(b.Iqn)
// Build a map of SCSI hosts for each target portal. We will need this to
// issue the bus rescans.
portalHostMap, err := b.deviceUtil.GetISCSIPortalHostMapForTarget(b.Iqn)
if err != nil {
return "", err
}
glog.V(4).Infof("AttachDisk portal->host map for %s is %v", b.Iqn, portalHostMap)
for _, tp := range bkpPortal {
// Rescan sessions to discover newly mapped LUNs. Do not specify the interface when rescanning
// to avoid establishing additional sessions to the same target.
out, err := b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-R")
hostNumber, loggedIn := portalHostMap[tp]
if !loggedIn {
glog.V(4).Infof("Could not get SCSI host number for portal %s, will attempt login", tp)
// build discoverydb and discover iscsi target
b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "new")
// update discoverydb with CHAP secret
err = updateISCSIDiscoverydb(b, tp)
if err != nil {
lastErr = fmt.Errorf("iscsi: failed to update discoverydb to portal %s error: %v", tp, err)
continue
}
out, err = b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "--discover")
if err != nil {
// delete discoverydb record
b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "delete")
lastErr = fmt.Errorf("iscsi: failed to sendtargets to portal %s output: %s, err %v", tp, string(out), err)
continue
}
err = updateISCSINode(b, tp)
if err != nil {
// failure to update node db is rare. But deleting record will likely impact those who already start using it.
lastErr = fmt.Errorf("iscsi: failed to update iscsi node to portal %s error: %v", tp, err)
continue
}
// login to iscsi target
out, err = b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-I", b.Iface, "--login")
if err != nil {
// delete the node record from database
b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-I", b.Iface, "-T", b.Iqn, "-o", "delete")
lastErr = fmt.Errorf("iscsi: failed to attach disk: Error: %s (%v)", string(out), err)
continue
}
// in case of node failure/restart, explicitly set to manual login so it doesn't hang on boot
out, err = b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-o", "update", "-n", "node.startup", "-v", "manual")
if err != nil {
// don't fail if we can't set startup mode, but log warning so there is a clue
glog.Warningf("Warning: Failed to set iSCSI login mode to manual. Error: %v", err)
}
// Rebuild the host map after logging in
portalHostMap, err := b.deviceUtil.GetISCSIPortalHostMapForTarget(b.Iqn)
if err != nil {
return "", err
}
glog.V(6).Infof("AttachDisk portal->host map for %s is %v", b.Iqn, portalHostMap)
hostNumber, loggedIn = portalHostMap[tp]
if !loggedIn {
glog.Warningf("Could not get SCSI host number for portal %s after logging in", tp)
continue
}
}
glog.V(5).Infof("AttachDisk: scanning SCSI host %d LUN %s", hostNumber, b.Lun)
lunNumber, err := strconv.Atoi(b.Lun)
if err != nil {
glog.Errorf("iscsi: failed to rescan session with error: %s (%v)", string(out), err)
return "", fmt.Errorf("AttachDisk: lun is not a number: %s\nError: %v", b.Lun, err)
}
// Scan the iSCSI bus for the LUN
err = scanOneLun(hostNumber, lunNumber)
if err != nil {
return "", err
}
if iscsiTransport == "" {
@@ -260,46 +386,6 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) {
devicePath = strings.Join([]string{"/dev/disk/by-path/pci", "*", "ip", tp, "iscsi", b.Iqn, "lun", b.Lun}, "-")
}
if exist := waitForPathToExist(&devicePath, 1, iscsiTransport); exist {
glog.V(4).Infof("iscsi: devicepath (%s) exists", devicePath)
devicePaths = append(devicePaths, devicePath)
continue
}
// build discoverydb and discover iscsi target
b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "new")
// update discoverydb with CHAP secret
err = updateISCSIDiscoverydb(b, tp)
if err != nil {
lastErr = fmt.Errorf("iscsi: failed to update discoverydb to portal %s error: %v", tp, err)
continue
}
out, err = b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "--discover")
if err != nil {
// delete discoverydb record
b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "delete")
lastErr = fmt.Errorf("iscsi: failed to sendtargets to portal %s output: %s, err %v", tp, string(out), err)
continue
}
err = updateISCSINode(b, tp)
if err != nil {
// failure to update node db is rare. But deleting record will likely impact those who already start using it.
lastErr = fmt.Errorf("iscsi: failed to update iscsi node to portal %s error: %v", tp, err)
continue
}
// login to iscsi target
out, err = b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-I", b.Iface, "--login")
if err != nil {
// delete the node record from database
b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-I", b.Iface, "-T", b.Iqn, "-o", "delete")
lastErr = fmt.Errorf("iscsi: failed to attach disk: Error: %s (%v)", string(out), err)
continue
}
// in case of node failure/restart, explicitly set to manual login so it doesn't hang on boot
out, err = b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-o", "update", "node.startup", "-v", "manual")
if err != nil {
// don't fail if we can't set startup mode, but log warning so there is a clue
glog.Warningf("Warning: Failed to set iSCSI login mode to manual. Error: %v", err)
}
if exist := waitForPathToExist(&devicePath, 10, iscsiTransport); !exist {
glog.Errorf("Could not attach disk: Timeout after 10s")
// update last error
@@ -320,20 +406,23 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) {
glog.Errorf("iscsi: last error occurred during iscsi init:\n%v", lastErr)
}
//Make sure we use a valid devicepath to find mpio device.
devicePath = devicePaths[0]
for _, path := range devicePaths {
// There shouldnt be any empty device paths. However adding this check
// for safer side to avoid the possibility of an empty entry.
if path == "" {
continue
}
// check if the dev is using mpio and if so mount it via the dm-XX device
if mappedDevicePath := b.deviceUtil.FindMultipathDeviceForDevice(path); mappedDevicePath != "" {
devicePath = mappedDevicePath
break
}
// Try to find a multipath device for the volume
if 1 < len(bkpPortal) {
// If the PV has 2 or more portals, wait up to 10 seconds for the multipath
// device to appear
devicePath = waitForMultiPathToExist(devicePaths, 10, b.deviceUtil)
} else {
// For PVs with 1 portal, just try one time to find the multipath device. This
// avoids a long pause when the multipath device will never get created, and
// matches legacy behavior.
devicePath = waitForMultiPathToExist(devicePaths, 1, b.deviceUtil)
}
// When no multipath device is found, just use the first (and presumably only) device
if devicePath == "" {
devicePath = devicePaths[0]
}
glog.V(5).Infof("iscsi: AttachDisk devicePath: %s", devicePath)
// run global mount path related operations based on volumeMode
return globalPDPathOperation(b)(b, devicePath, util)
@@ -394,6 +483,68 @@ func globalPDPathOperation(b iscsiDiskMounter) func(iscsiDiskMounter, string, *I
}
}
// Delete 1 block device of the form "sd*"
func deleteDevice(deviceName string) error {
filename := fmt.Sprintf("/sys/block/%s/device/delete", deviceName)
fd, err := os.OpenFile(filename, os.O_WRONLY, 0)
if err != nil {
// The file was not present, so just return without error
return nil
}
defer fd.Close()
if written, err := fd.WriteString("1"); err != nil {
return err
} else if 0 == written {
return fmt.Errorf("No data written to file: %s", filename)
}
glog.V(4).Infof("Deleted block device: %s", deviceName)
return nil
}
// deleteDevices tries to remove all the block devices and multipath map devices
// associated with a given iscsi device
func deleteDevices(c iscsiDiskUnmounter) error {
lunNumber, err := strconv.Atoi(c.iscsiDisk.Lun)
if err != nil {
glog.Errorf("iscsi delete devices: lun is not a number: %s\nError: %v", c.iscsiDisk.Lun, err)
return err
}
// Enumerate the devices so we can delete them
deviceNames, err := c.deviceUtil.FindDevicesForISCSILun(c.iscsiDisk.Iqn, lunNumber)
if err != nil {
glog.Errorf("iscsi delete devices: could not get devices associated with LUN %d on target %s\nError: %v",
lunNumber, c.iscsiDisk.Iqn, err)
return err
}
// Find the multipath device path(s)
mpathDevices := make(map[string]bool)
for _, deviceName := range deviceNames {
path := "/dev/" + deviceName
// check if the dev is using mpio and if so mount it via the dm-XX device
if mappedDevicePath := c.deviceUtil.FindMultipathDeviceForDevice(path); mappedDevicePath != "" {
mpathDevices[mappedDevicePath] = true
}
}
// Flush any multipath device maps
for mpathDevice := range mpathDevices {
_, err = c.exec.Run("multipath", "-f", mpathDevice)
if err != nil {
glog.Warningf("Warning: Failed to flush multipath device map: %s\nError: %v", mpathDevice, err)
// Fall through -- keep deleting the block devices
}
glog.V(4).Infof("Flushed multipath device: %s", mpathDevice)
}
for _, deviceName := range deviceNames {
err = deleteDevice(deviceName)
if err != nil {
glog.Warningf("Warning: Failed to delete block device: %s\nError: %v", deviceName, err)
// Fall through -- keep deleting other block devices
}
}
return nil
}
// DetachDisk unmounts and detaches a volume from node
func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error {
if pathExists, pathErr := volumeutil.PathExists(mntPath); pathErr != nil {
@@ -402,19 +553,23 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", mntPath)
return nil
}
if err := c.mounter.Unmount(mntPath); err != nil {
glog.Errorf("iscsi detach disk: failed to unmount: %s\nError: %v", mntPath, err)
notMnt, err := c.mounter.IsLikelyNotMountPoint(mntPath)
if err != nil {
return err
}
if !notMnt {
if err := c.mounter.Unmount(mntPath); err != nil {
glog.Errorf("iscsi detach disk: failed to unmount: %s\nError: %v", mntPath, err)
return err
}
}
// if device is no longer used, see if need to logout the target
device, prefix, err := extractDeviceAndPrefix(mntPath)
if err != nil {
return err
}
refCount, err := getDevicePrefixRefCount(c.mounter, prefix)
if err != nil || refCount != 0 {
return nil
}
var bkpPortal []string
var volName, iqn, iface, initiatorName string
@@ -438,6 +593,23 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error {
// Logout may fail as no session may exist for the portal/IQN on the specified interface.
iface, found = extractIface(mntPath)
}
// Delete all the scsi devices and any multipath devices after unmounting
if err = deleteDevices(c); err != nil {
glog.Warningf("iscsi detach disk: failed to delete devices\nError: %v", err)
// Fall through -- even if deleting fails, a logout may fix problems
}
// Lock the target while we determine if we can safely log out or not
c.plugin.targetLocks.LockKey(iqn)
defer c.plugin.targetLocks.UnlockKey(iqn)
// if device is no longer used, see if need to logout the target
refCount, err := getDevicePrefixRefCount(c.mounter, prefix)
if err != nil || refCount != 0 {
return nil
}
portals := removeDuplicate(bkpPortal)
if len(portals) == 0 {
return fmt.Errorf("iscsi detach disk: failed to detach iscsi disk. Couldn't get connected portals from configurations")
@@ -516,7 +688,7 @@ func (util *ISCSIUtil) DetachBlockISCSIDisk(c iscsiDiskUnmapper, mapPath string)
if err.Error() != volumepathhandler.ErrDeviceNotFound {
return fmt.Errorf("failed to get loopback for device: %v, err: %v", devicePath, err)
}
glog.Warning("iscsi: loopback for device: %s not found", device)
glog.Warningf("iscsi: loopback for device: %s not found", device)
}
// Detach a volume from kubelet node
err = util.detachISCSIDisk(c.exec, portals, iqn, iface, volName, initiatorName, found)

View File

@@ -16,54 +16,48 @@ go_library(
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/validation:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = select({
"@io_bazel_rules_go//go/platform:darwin": [
"local_test.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"local_linux_test.go",
"local_test.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"local_test.go",
],
"//conditions:default": [],
}),
srcs = [
"local_linux_test.go",
"local_test.go",
],
embed = [":go_default_library"],
deps = select({
"@io_bazel_rules_go//go/platform:darwin": [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
],
"@io_bazel_rules_go//go/platform:linux": [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
],
"@io_bazel_rules_go//go/platform:windows": [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
],
"//conditions:default": [],
}),

View File

@@ -38,6 +38,10 @@ import (
"k8s.io/kubernetes/pkg/volume/validation"
)
const (
defaultFSType = "ext4"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&localVolumePlugin{}}
@@ -59,7 +63,7 @@ const (
func (plugin *localVolumePlugin) Init(host volume.VolumeHost) error {
plugin.host = host
plugin.volumeLocks = keymutex.NewKeyMutex()
plugin.volumeLocks = keymutex.NewHashed(0)
plugin.recorder = host.GetEventRecorder()
return nil
}
@@ -106,7 +110,12 @@ func getVolumeSource(spec *volume.Spec) (*v1.LocalVolumeSource, bool, error) {
}
func (plugin *localVolumePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
volumeSource, readOnly, err := getVolumeSource(spec)
_, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
globalLocalPath, err := plugin.getGlobalLocalPath(spec)
if err != nil {
return nil, err
}
@@ -118,8 +127,8 @@ func (plugin *localVolumePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ vo
volName: spec.Name(),
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
plugin: plugin,
globalPath: volumeSource.Path,
MetricsProvider: volume.NewMetricsStatFS(volumeSource.Path),
globalPath: globalLocalPath,
MetricsProvider: volume.NewMetricsStatFS(plugin.host.GetPodVolumeDir(pod.UID, stringsutil.EscapeQualifiedNameForDisk(localVolumePluginName), spec.Name())),
},
readOnly: readOnly,
}, nil
@@ -169,6 +178,7 @@ func (plugin *localVolumePlugin) NewBlockVolumeUnmapper(volName string,
// TODO: check if no path and no topology constraints are ok
func (plugin *localVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
fs := v1.PersistentVolumeFilesystem
localVolume := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: volumeName,
@@ -179,6 +189,7 @@ func (plugin *localVolumePlugin) ConstructVolumeSpec(volumeName, mountPath strin
Path: "",
},
},
VolumeMode: &fs,
},
}
return volume.NewSpecFromPersistentVolume(localVolume, false), nil
@@ -205,6 +216,163 @@ func (plugin *localVolumePlugin) ConstructBlockVolumeSpec(podUID types.UID, volu
return volume.NewSpecFromPersistentVolume(localVolume, false), nil
}
func (plugin *localVolumePlugin) generateBlockDeviceBaseGlobalPath() string {
return filepath.Join(plugin.host.GetPluginDir(localVolumePluginName), mount.MountsInGlobalPDPath)
}
func (plugin *localVolumePlugin) getGlobalLocalPath(spec *volume.Spec) (string, error) {
if spec.PersistentVolume.Spec.Local == nil || len(spec.PersistentVolume.Spec.Local.Path) == 0 {
return "", fmt.Errorf("local volume source is nil or local path is not set")
}
fileType, err := plugin.host.GetMounter(plugin.GetPluginName()).GetFileType(spec.PersistentVolume.Spec.Local.Path)
if err != nil {
return "", err
}
switch fileType {
case mount.FileTypeDirectory:
return spec.PersistentVolume.Spec.Local.Path, nil
case mount.FileTypeBlockDev:
return filepath.Join(plugin.generateBlockDeviceBaseGlobalPath(), spec.Name()), nil
default:
return "", fmt.Errorf("only directory and block device are supported")
}
}
var _ volume.DeviceMountableVolumePlugin = &localVolumePlugin{}
type deviceMounter struct {
plugin *localVolumePlugin
mounter *mount.SafeFormatAndMount
}
var _ volume.DeviceMounter = &deviceMounter{}
func (plugin *localVolumePlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return &deviceMounter{
plugin: plugin,
mounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
}, nil
}
func (dm *deviceMounter) mountLocalBlockDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
glog.V(4).Infof("local: mounting device %s to %s", devicePath, deviceMountPath)
notMnt, err := dm.mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
return err
}
notMnt = true
} else {
return err
}
}
if !notMnt {
return nil
}
fstype, err := getVolumeSourceFSType(spec)
if err != nil {
return err
}
ro, err := getVolumeSourceReadOnly(spec)
if err != nil {
return err
}
options := []string{}
if ro {
options = append(options, "ro")
}
mountOptions := util.MountOptionFromSpec(spec, options...)
err = dm.mounter.FormatAndMount(devicePath, deviceMountPath, fstype, mountOptions)
if err != nil {
os.Remove(deviceMountPath)
return fmt.Errorf("local: failed to mount device %s at %s (fstype: %s), error %v", devicePath, deviceMountPath, fstype, err)
}
glog.V(3).Infof("local: successfully mount device %s at %s (fstype: %s)", devicePath, deviceMountPath, fstype)
return nil
}
func (dm *deviceMounter) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
if spec.PersistentVolume.Spec.Local == nil || len(spec.PersistentVolume.Spec.Local.Path) == 0 {
return fmt.Errorf("local volume source is nil or local path is not set")
}
fileType, err := dm.mounter.GetFileType(spec.PersistentVolume.Spec.Local.Path)
if err != nil {
return err
}
switch fileType {
case mount.FileTypeBlockDev:
// local volume plugin does not implement AttachableVolumePlugin interface, so set devicePath to Path in PV spec directly
devicePath = spec.PersistentVolume.Spec.Local.Path
return dm.mountLocalBlockDevice(spec, devicePath, deviceMountPath)
case mount.FileTypeDirectory:
// if the given local volume path is of already filesystem directory, return directly
return nil
default:
return fmt.Errorf("only directory and block device are supported")
}
}
func getVolumeSourceFSType(spec *volume.Spec) (string, error) {
if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.Local != nil {
if spec.PersistentVolume.Spec.Local.FSType != nil {
return *spec.PersistentVolume.Spec.Local.FSType, nil
} else {
// if the FSType is not set in local PV spec, setting it to default ("ext4")
return defaultFSType, nil
}
}
return "", fmt.Errorf("spec does not reference a Local volume type")
}
func getVolumeSourceReadOnly(spec *volume.Spec) (bool, error) {
if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.Local != nil {
// local volumes used as a PersistentVolume gets the ReadOnly flag indirectly through
// the persistent-claim volume used to mount the PV
return spec.ReadOnly, nil
}
return false, fmt.Errorf("spec does not reference a Local volume type")
}
func (dm *deviceMounter) GetDeviceMountPath(spec *volume.Spec) (string, error) {
return dm.plugin.getGlobalLocalPath(spec)
}
func (plugin *localVolumePlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return &deviceMounter{
plugin: plugin,
mounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
}, nil
}
func (plugin *localVolumePlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mounter.GetMountRefs(deviceMountPath)
}
var _ volume.DeviceUnmounter = &deviceMounter{}
func (dm *deviceMounter) UnmountDevice(deviceMountPath string) error {
// If the local PV is a block device,
// The deviceMountPath is generated to the format like :/var/lib/kubelet/plugins/kubernetes.io/local-volume/mounts/localpv.spec.Name;
// If it is a filesystem directory, then the deviceMountPath is set directly to pvSpec.Local.Path
// We only need to unmount block device here, so we need to check if the deviceMountPath passed here
// has base mount path: /var/lib/kubelet/plugins/kubernetes.io/local-volume/mounts
basemountPath := dm.plugin.generateBlockDeviceBaseGlobalPath()
if mount.PathWithinBase(deviceMountPath, basemountPath) {
return util.UnmountPath(deviceMountPath, dm.mounter)
}
return nil
}
// Local volumes represent a local directory on a node.
// The directory at the globalPath will be bind-mounted to the pod's directory
type localVolume struct {

View File

@@ -31,16 +31,18 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
const (
testPVName = "pvA"
testMountPath = "pods/poduid/volumes/kubernetes.io~local-volume/pvA"
testGlobalPath = "plugins/kubernetes.io~local-volume/volumeDevices/pvA"
testPodPath = "pods/poduid/volumeDevices/kubernetes.io~local-volume"
testNodeName = "fakeNodeName"
testPVName = "pvA"
testMountPath = "pods/poduid/volumes/kubernetes.io~local-volume/pvA"
testGlobalPath = "plugins/kubernetes.io~local-volume/volumeDevices/pvA"
testPodPath = "pods/poduid/volumeDevices/kubernetes.io~local-volume"
testNodeName = "fakeNodeName"
testBlockFormattingToFSGlobalPath = "plugins/kubernetes.io/local-volume/mounts/pvA"
)
func getPlugin(t *testing.T) (string, volume.VolumePlugin) {
@@ -102,6 +104,33 @@ func getPersistentPlugin(t *testing.T) (string, volume.PersistentVolumePlugin) {
return tmpDir, plug
}
func getDeviceMountablePluginWithBlockPath(t *testing.T, isBlockDevice bool) (string, volume.DeviceMountableVolumePlugin) {
tmpDir, err := utiltesting.MkTmpdir("localVolumeTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
plugMgr := volume.VolumePluginMgr{}
var pathToFSType map[string]mount.FileType
if isBlockDevice {
pathToFSType = map[string]mount.FileType{
tmpDir: mount.FileTypeBlockDev,
}
}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHostWithMounterFSType(tmpDir, nil, nil, pathToFSType))
plug, err := plugMgr.FindDeviceMountablePluginByName(localVolumePluginName)
if err != nil {
os.RemoveAll(tmpDir)
t.Fatalf("Can't find the plugin by name")
}
if plug.GetPluginName() != localVolumePluginName {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
return tmpDir, plug
}
func getTestVolume(readOnly bool, path string, isBlock bool) *volume.Spec {
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
@@ -179,6 +208,87 @@ func TestInvalidLocalPath(t *testing.T) {
}
}
func TestBlockDeviceGlobalPathAndMountDevice(t *testing.T) {
// Block device global mount path testing
tmpBlockDir, plug := getDeviceMountablePluginWithBlockPath(t, true)
defer os.RemoveAll(tmpBlockDir)
dm, err := plug.NewDeviceMounter()
if err != nil {
t.Errorf("Failed to make a new device mounter: %v", err)
}
pvSpec := getTestVolume(false, tmpBlockDir, false)
expectedGlobalPath := filepath.Join(tmpBlockDir, testBlockFormattingToFSGlobalPath)
actualPath, err := dm.GetDeviceMountPath(pvSpec)
if err != nil {
t.Errorf("Failed to get device mount path: %v", err)
}
if expectedGlobalPath != actualPath {
t.Fatalf("Expected device mount global path:%s, got: %s", expectedGlobalPath, actualPath)
}
fmt.Println("expected global path is:", expectedGlobalPath)
err = dm.MountDevice(pvSpec, tmpBlockDir, expectedGlobalPath)
if err != nil {
t.Fatal(err)
}
if _, err := os.Stat(actualPath); err != nil {
if os.IsNotExist(err) {
t.Errorf("DeviceMounter.MountDevice() failed, device mount path not created: %s", actualPath)
} else {
t.Errorf("DeviceMounter.MountDevice() failed: %v", err)
}
}
du, err := plug.NewDeviceUnmounter()
if err != nil {
t.Fatalf("Create device unmounter error: %v", err)
}
err = du.UnmountDevice(actualPath)
if err != nil {
t.Fatalf("Unmount device error: %v", err)
}
}
func TestFSGlobalPathAndMountDevice(t *testing.T) {
// FS global path testing
tmpFSDir, plug := getDeviceMountablePluginWithBlockPath(t, false)
defer os.RemoveAll(tmpFSDir)
dm, err := plug.NewDeviceMounter()
if err != nil {
t.Errorf("Failed to make a new device mounter: %v", err)
}
pvSpec := getTestVolume(false, tmpFSDir, false)
expectedGlobalPath := tmpFSDir
actualPath, err := dm.GetDeviceMountPath(pvSpec)
if err != nil {
t.Errorf("Failed to get device mount path: %v", err)
}
if expectedGlobalPath != actualPath {
t.Fatalf("Expected device mount global path:%s, got: %s", expectedGlobalPath, actualPath)
}
// Actually, we will do nothing if the local path is FS type
err = dm.MountDevice(pvSpec, tmpFSDir, expectedGlobalPath)
if err != nil {
t.Fatal(err)
}
if _, err := os.Stat(expectedGlobalPath); err != nil {
if os.IsNotExist(err) {
t.Errorf("DeviceMounter.MountDevice() failed, device mount path not created: %s", expectedGlobalPath)
} else {
t.Errorf("DeviceMounter.MountDevice() failed: %v", err)
}
}
}
func TestMountUnmount(t *testing.T) {
tmpDir, plug := getPlugin(t)
defer os.RemoveAll(tmpDir)
@@ -336,6 +446,14 @@ func TestConstructVolumeSpec(t *testing.T) {
t.Fatalf("PersistentVolume object nil")
}
if spec.PersistentVolume.Spec.VolumeMode == nil {
t.Fatalf("Volume mode has not been set.")
}
if *spec.PersistentVolume.Spec.VolumeMode != v1.PersistentVolumeFilesystem {
t.Errorf("Unexpected volume mode %q", *spec.PersistentVolume.Spec.VolumeMode)
}
ls := pv.Spec.PersistentVolumeSource.Local
if ls == nil {
t.Fatalf("LocalVolumeSource object nil")

View File

@@ -19,10 +19,10 @@ go_library(
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/recyclerclient:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)
@@ -34,11 +34,11 @@ go_test(
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
],
)

View File

@@ -21,11 +21,11 @@ go_library(
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)
@@ -41,10 +41,10 @@ go_test(
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)

View File

@@ -40,8 +40,13 @@ type photonPersistentDiskAttacher struct {
}
var _ volume.Attacher = &photonPersistentDiskAttacher{}
var _ volume.DeviceMounter = &photonPersistentDiskAttacher{}
var _ volume.AttachableVolumePlugin = &photonPersistentDiskPlugin{}
var _ volume.DeviceMountableVolumePlugin = &photonPersistentDiskPlugin{}
func (plugin *photonPersistentDiskPlugin) NewAttacher() (volume.Attacher, error) {
photonCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
@@ -55,6 +60,10 @@ func (plugin *photonPersistentDiskPlugin) NewAttacher() (volume.Attacher, error)
}, nil
}
func (plugin *photonPersistentDiskPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return plugin.NewAttacher()
}
// Attaches the volume specified by the given spec to the given host.
// On success, returns the device path where the device was attached on the
// node.
@@ -182,7 +191,7 @@ func (attacher *photonPersistentDiskAttacher) GetDeviceMountPath(spec *volume.Sp
// by deviceMountPath; returns a list of paths.
func (plugin *photonPersistentDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
return mounter.GetMountRefs(deviceMountPath)
}
// MountDevice mounts device to global mount point.
@@ -229,6 +238,8 @@ type photonPersistentDiskDetacher struct {
var _ volume.Detacher = &photonPersistentDiskDetacher{}
var _ volume.DeviceUnmounter = &photonPersistentDiskDetacher{}
func (plugin *photonPersistentDiskPlugin) NewDetacher() (volume.Detacher, error) {
photonCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
@@ -242,6 +253,10 @@ func (plugin *photonPersistentDiskPlugin) NewDetacher() (volume.Detacher, error)
}, nil
}
func (plugin *photonPersistentDiskPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return plugin.NewDetacher()
}
// Detach the given device from the given host.
func (detacher *photonPersistentDiskDetacher) Detach(volumeName string, nodeName types.NodeName) error {

View File

@@ -88,9 +88,11 @@ func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pd
}
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volSizeBytes := capacity.Value()
// PhotonController works with GB, convert to GB with rounding up
volSizeGB := int(volumeutil.RoundUpSize(volSizeBytes, 1024*1024*1024))
// PhotonController works with GiB, convert to GiB with rounding up
volSizeGB, err := volumeutil.RoundUpToGiBInt(capacity)
if err != nil {
return "", 0, "", err
}
name := volumeutil.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 255)
volumeOptions := &photon.VolumeOptions{
CapacityGB: volSizeGB,

View File

@@ -32,8 +32,8 @@ import (
"k8s.io/apimachinery/pkg/util/validation"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
)
@@ -203,9 +203,17 @@ type ProvisionableVolumePlugin interface {
// AttachableVolumePlugin is an extended interface of VolumePlugin and is used for volumes that require attachment
// to a node before mounting.
type AttachableVolumePlugin interface {
VolumePlugin
DeviceMountableVolumePlugin
NewAttacher() (Attacher, error)
NewDetacher() (Detacher, error)
}
// DeviceMountableVolumePlugin is an extended interface of VolumePlugin and is used
// for volumes that requires mount device to a node before binding to volume to pod.
type DeviceMountableVolumePlugin interface {
VolumePlugin
NewDeviceMounter() (DeviceMounter, error)
NewDeviceUnmounter() (DeviceUnmounter, error)
GetDeviceMountRefs(deviceMountPath string) ([]string, error)
}
@@ -302,6 +310,9 @@ type VolumeHost interface {
// GetKubeClient returns a client interface
GetKubeClient() clientset.Interface
// GetCSIClient returns a client interface to csi.storage.k8s.io
GetCSIClient() csiclientset.Interface
// NewWrapperMounter finds an appropriate plugin with which to handle
// the provided spec. This is used to implement volume plugins which
// "wrap" other plugins. For example, the "secret" volume is
@@ -319,9 +330,6 @@ type VolumeHost interface {
// Get mounter interface.
GetMounter(pluginName string) mount.Interface
// Get writer interface for writing data to disk.
GetWriter() io.Writer
// Returns the hostname of the host kubelet is running on
GetHostName() string
@@ -605,7 +613,8 @@ func (pm *VolumePluginMgr) refreshProbedPlugins() {
}
pm.probedPlugins[event.Plugin.GetPluginName()] = event.Plugin
} else if event.Op == ProbeRemove {
delete(pm.probedPlugins, event.Plugin.GetPluginName())
// Plugin is not available on ProbeRemove event, only PluginName
delete(pm.probedPlugins, event.PluginName)
} else {
glog.Errorf("Unknown Operation on PluginName: %s.",
event.Plugin.GetPluginName())
@@ -760,6 +769,30 @@ func (pm *VolumePluginMgr) FindAttachablePluginByName(name string) (AttachableVo
return nil, nil
}
// FindDeviceMountablePluginBySpec fetches a persistent volume plugin by spec.
func (pm *VolumePluginMgr) FindDeviceMountablePluginBySpec(spec *Spec) (DeviceMountableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginBySpec(spec)
if err != nil {
return nil, err
}
if deviceMountableVolumePlugin, ok := volumePlugin.(DeviceMountableVolumePlugin); ok {
return deviceMountableVolumePlugin, nil
}
return nil, nil
}
// FindDeviceMountablePluginByName fetches a devicemountable volume plugin by name.
func (pm *VolumePluginMgr) FindDeviceMountablePluginByName(name string) (DeviceMountableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginByName(name)
if err != nil {
return nil, err
}
if deviceMountableVolumePlugin, ok := volumePlugin.(DeviceMountableVolumePlugin); ok {
return deviceMountableVolumePlugin, nil
}
return nil, nil
}
// FindExpandablePluginBySpec fetches a persistent volume plugin by spec.
func (pm *VolumePluginMgr) FindExpandablePluginBySpec(spec *Spec) (ExpandableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginBySpec(spec)

View File

@@ -14,10 +14,10 @@ go_test(
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
],
)
@@ -35,16 +35,16 @@ go_library(
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/libopenstorage/openstorage/api:go_default_library",
"//vendor/github.com/libopenstorage/openstorage/api/client:go_default_library",
"//vendor/github.com/libopenstorage/openstorage/api/client/volume:go_default_library",
"//vendor/github.com/libopenstorage/openstorage/api/spec:go_default_library",
"//vendor/github.com/libopenstorage/openstorage/volume:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)

View File

@@ -269,10 +269,9 @@ var _ volume.Mounter = &portworxVolumeMounter{}
func (b *portworxVolumeMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: b.readOnly,
Managed: !b.readOnly,
// true ?
SupportsSELinux: true,
ReadOnly: b.readOnly,
Managed: !b.readOnly,
SupportsSELinux: false,
}
}

View File

@@ -71,8 +71,13 @@ func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (stri
spec = specHandler.DefaultSpec()
}
// Pass all parameters as volume labels for Portworx server-side processing.
spec.VolumeLabels = p.options.Parameters
// Pass all parameters as volume labels for Portworx server-side processing
if len(p.options.Parameters) > 0 {
spec.VolumeLabels = p.options.Parameters
} else {
spec.VolumeLabels = make(map[string]string, 0)
}
// Update the requested size in the spec
spec.Size = uint64(requestGiB * volutil.GIB)
@@ -206,7 +211,7 @@ func (util *PortworxVolumeUtil) ResizeVolume(spec *volume.Spec, newSize resource
newSizeInBytes := uint64(volutil.RoundUpToGiB(newSize) * volutil.GIB)
if vol.Spec.Size >= newSizeInBytes {
glog.Infof("Portworx volume: %s already at size: %d greater than or equal to new "+
"requested size: %d. Skipping resize.", vol.Spec.Size, newSizeInBytes)
"requested size: %d. Skipping resize.", spec.Name(), vol.Spec.Size, newSizeInBytes)
return nil
}

Some files were not shown because too many files have changed in this diff Show More