Add generated file
This PR adds generated files under pkg/client and vendor folder.
This commit is contained in:
83
vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD
generated
vendored
Normal file
83
vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD
generated
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"csi_attacher.go",
|
||||
"csi_block.go",
|
||||
"csi_client.go",
|
||||
"csi_mounter.go",
|
||||
"csi_plugin.go",
|
||||
"csi_util.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/csi",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/csi/labelmanager:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"csi_attacher_test.go",
|
||||
"csi_block_test.go",
|
||||
"csi_client_test.go",
|
||||
"csi_mounter_test.go",
|
||||
"csi_plugin_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/csi/fake:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/volume/csi/fake:all-srcs",
|
||||
"//pkg/volume/csi/labelmanager:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
4
vendor/k8s.io/kubernetes/pkg/volume/csi/OWNERS
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/pkg/volume/csi/OWNERS
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
approvers:
|
||||
- jsafrane
|
||||
- saad-ali
|
||||
- vladimirvivien
|
567
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go
generated
vendored
Normal file
567
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go
generated
vendored
Normal file
@@ -0,0 +1,567 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
|
||||
"k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
const (
|
||||
persistentVolumeInGlobalPath = "pv"
|
||||
globalMountInGlobalPath = "globalmount"
|
||||
)
|
||||
|
||||
type csiAttacher struct {
|
||||
plugin *csiPlugin
|
||||
k8s kubernetes.Interface
|
||||
waitSleepTime time.Duration
|
||||
|
||||
csiClient csiClient
|
||||
}
|
||||
|
||||
// volume.Attacher methods
|
||||
var _ volume.Attacher = &csiAttacher{}
|
||||
|
||||
func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
if spec == nil {
|
||||
glog.Error(log("attacher.Attach missing volume.Spec"))
|
||||
return "", errors.New("missing spec")
|
||||
}
|
||||
|
||||
csiSource, err := getCSISourceFromSpec(spec)
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.Attach failed to get CSI persistent source: %v", err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
node := string(nodeName)
|
||||
pvName := spec.PersistentVolume.GetName()
|
||||
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, node)
|
||||
|
||||
attachment := &storage.VolumeAttachment{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: attachID,
|
||||
},
|
||||
Spec: storage.VolumeAttachmentSpec{
|
||||
NodeName: node,
|
||||
Attacher: csiSource.Driver,
|
||||
Source: storage.VolumeAttachmentSource{
|
||||
PersistentVolumeName: &pvName,
|
||||
},
|
||||
},
|
||||
Status: storage.VolumeAttachmentStatus{Attached: false},
|
||||
}
|
||||
|
||||
_, err = c.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
alreadyExist := false
|
||||
if err != nil {
|
||||
if !apierrs.IsAlreadyExists(err) {
|
||||
glog.Error(log("attacher.Attach failed: %v", err))
|
||||
return "", err
|
||||
}
|
||||
alreadyExist = true
|
||||
}
|
||||
|
||||
if alreadyExist {
|
||||
glog.V(4).Info(log("attachment [%v] for volume [%v] already exists (will not be recreated)", attachID, csiSource.VolumeHandle))
|
||||
} else {
|
||||
glog.V(4).Info(log("attachment [%v] for volume [%v] created successfully", attachID, csiSource.VolumeHandle))
|
||||
}
|
||||
|
||||
if _, err := c.waitForVolumeAttachment(csiSource.VolumeHandle, attachID, csiTimeout); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
glog.V(4).Info(log("attacher.Attach finished OK with VolumeAttachment object [%s]", attachID))
|
||||
|
||||
return attachID, nil
|
||||
}
|
||||
|
||||
func (c *csiAttacher) WaitForAttach(spec *volume.Spec, attachID string, pod *v1.Pod, timeout time.Duration) (string, error) {
|
||||
source, err := getCSISourceFromSpec(spec)
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.WaitForAttach failed to extract CSI volume source: %v", err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
return c.waitForVolumeAttachment(source.VolumeHandle, attachID, timeout)
|
||||
}
|
||||
|
||||
func (c *csiAttacher) waitForVolumeAttachment(volumeHandle, attachID string, timeout time.Duration) (string, error) {
|
||||
glog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID))
|
||||
|
||||
timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable
|
||||
defer timer.Stop()
|
||||
|
||||
return c.waitForVolumeAttachmentInternal(volumeHandle, attachID, timer, timeout)
|
||||
}
|
||||
|
||||
func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID string, timer *time.Timer, timeout time.Duration) (string, error) {
|
||||
glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
|
||||
attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.WaitForAttach failed for volume [%s] (will continue to try): %v", volumeHandle, err))
|
||||
return "", err
|
||||
}
|
||||
// if being deleted, fail fast
|
||||
if attach.GetDeletionTimestamp() != nil {
|
||||
glog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachID))
|
||||
return "", errors.New("volume attachment is being deleted")
|
||||
}
|
||||
// attachment OK
|
||||
if attach.Status.Attached {
|
||||
return attachID, nil
|
||||
}
|
||||
// driver reports attach error
|
||||
attachErr := attach.Status.AttachError
|
||||
if attachErr != nil {
|
||||
glog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message))
|
||||
return "", errors.New(attachErr.Message)
|
||||
}
|
||||
|
||||
watcher, err := c.k8s.StorageV1beta1().VolumeAttachments().Watch(meta.SingleObject(meta.ObjectMeta{Name: attachID, ResourceVersion: attach.ResourceVersion}))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("watch error:%v for volume %v", err, volumeHandle)
|
||||
}
|
||||
|
||||
ch := watcher.ResultChan()
|
||||
defer watcher.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-ch:
|
||||
if !ok {
|
||||
glog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID)
|
||||
return "", errors.New("volume attachment watch channel had been closed")
|
||||
}
|
||||
|
||||
switch event.Type {
|
||||
case watch.Added, watch.Modified:
|
||||
attach, _ := event.Object.(*storage.VolumeAttachment)
|
||||
// if being deleted, fail fast
|
||||
if attach.GetDeletionTimestamp() != nil {
|
||||
glog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachID))
|
||||
return "", errors.New("volume attachment is being deleted")
|
||||
}
|
||||
// attachment OK
|
||||
if attach.Status.Attached {
|
||||
return attachID, nil
|
||||
}
|
||||
// driver reports attach error
|
||||
attachErr := attach.Status.AttachError
|
||||
if attachErr != nil {
|
||||
glog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message))
|
||||
return "", errors.New(attachErr.Message)
|
||||
}
|
||||
case watch.Deleted:
|
||||
// if deleted, fail fast
|
||||
glog.Error(log("VolumeAttachment [%s] has been deleted, will not continue to wait for attachment", attachID))
|
||||
return "", errors.New("volume attachment has been deleted")
|
||||
|
||||
case watch.Error:
|
||||
// start another cycle
|
||||
c.waitForVolumeAttachmentInternal(volumeHandle, attachID, timer, timeout)
|
||||
}
|
||||
|
||||
case <-timer.C:
|
||||
glog.Error(log("attacher.WaitForAttach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID))
|
||||
return "", fmt.Errorf("attachment timeout for volume %v", volumeHandle)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
|
||||
glog.V(4).Info(log("probing attachment status for %d volume(s) ", len(specs)))
|
||||
|
||||
attached := make(map[*volume.Spec]bool)
|
||||
|
||||
for _, spec := range specs {
|
||||
if spec == nil {
|
||||
glog.Error(log("attacher.VolumesAreAttached missing volume.Spec"))
|
||||
return nil, errors.New("missing spec")
|
||||
}
|
||||
source, err := getCSISourceFromSpec(spec)
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.VolumesAreAttached failed: %v", err))
|
||||
continue
|
||||
}
|
||||
|
||||
attachID := getAttachmentName(source.VolumeHandle, source.Driver, string(nodeName))
|
||||
glog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID))
|
||||
attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err))
|
||||
continue
|
||||
}
|
||||
glog.V(4).Info(log("attacher.VolumesAreAttached attachment [%v] has status.attached=%t", attachID, attach.Status.Attached))
|
||||
attached[spec] = attach.Status.Attached
|
||||
}
|
||||
|
||||
return attached, nil
|
||||
}
|
||||
|
||||
func (c *csiAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
|
||||
glog.V(4).Info(log("attacher.GetDeviceMountPath(%v)", spec))
|
||||
deviceMountPath, err := makeDeviceMountPath(c.plugin, spec)
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.GetDeviceMountPath failed to make device mount path: %v", err))
|
||||
return "", err
|
||||
}
|
||||
glog.V(4).Infof("attacher.GetDeviceMountPath succeeded, deviceMountPath: %s", deviceMountPath)
|
||||
return deviceMountPath, nil
|
||||
}
|
||||
|
||||
func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
||||
glog.V(4).Infof(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath))
|
||||
|
||||
mounted, err := isDirMounted(c.plugin, deviceMountPath)
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.MountDevice failed while checking mount status for dir [%s]", deviceMountPath))
|
||||
return err
|
||||
}
|
||||
|
||||
if mounted {
|
||||
glog.V(4).Info(log("attacher.MountDevice skipping mount, dir already mounted [%s]", deviceMountPath))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Setup
|
||||
if spec == nil {
|
||||
return fmt.Errorf("attacher.MountDevice failed, spec is nil")
|
||||
}
|
||||
csiSource, err := getCSISourceFromSpec(spec)
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.MountDevice failed to get CSI persistent source: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
if c.csiClient == nil {
|
||||
c.csiClient = newCsiDriverClient(csiSource.Driver)
|
||||
}
|
||||
csi := c.csiClient
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
// Check whether "STAGE_UNSTAGE_VOLUME" is set
|
||||
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.MountDevice failed to check STAGE_UNSTAGE_VOLUME: %v", err))
|
||||
return err
|
||||
}
|
||||
if !stageUnstageSet {
|
||||
glog.Infof(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start MountDevice
|
||||
if deviceMountPath == "" {
|
||||
return fmt.Errorf("attacher.MountDevice failed, deviceMountPath is empty")
|
||||
}
|
||||
|
||||
nodeName := string(c.plugin.host.GetNodeName())
|
||||
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
|
||||
|
||||
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
|
||||
attachment, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.MountDevice failed while getting volume attachment [id=%v]: %v", attachID, err))
|
||||
return err
|
||||
}
|
||||
|
||||
if attachment == nil {
|
||||
glog.Error(log("unable to find VolumeAttachment [id=%s]", attachID))
|
||||
return errors.New("no existing VolumeAttachment found")
|
||||
}
|
||||
publishVolumeInfo := attachment.Status.AttachmentMetadata
|
||||
|
||||
nodeStageSecrets := map[string]string{}
|
||||
if csiSource.NodeStageSecretRef != nil {
|
||||
nodeStageSecrets, err = getCredentialsFromSecret(c.k8s, csiSource.NodeStageSecretRef)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching NodeStageSecretRef %s/%s failed: %v",
|
||||
csiSource.NodeStageSecretRef.Namespace, csiSource.NodeStageSecretRef.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// create target_dir before call to NodeStageVolume
|
||||
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
|
||||
glog.Error(log("attacher.MountDevice failed to create dir %#v: %v", deviceMountPath, err))
|
||||
return err
|
||||
}
|
||||
glog.V(4).Info(log("created target path successfully [%s]", deviceMountPath))
|
||||
|
||||
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
|
||||
accessMode := v1.ReadWriteOnce
|
||||
if spec.PersistentVolume.Spec.AccessModes != nil {
|
||||
accessMode = spec.PersistentVolume.Spec.AccessModes[0]
|
||||
}
|
||||
|
||||
fsType := csiSource.FSType
|
||||
if len(fsType) == 0 {
|
||||
fsType = defaultFSType
|
||||
}
|
||||
|
||||
err = csi.NodeStageVolume(ctx,
|
||||
csiSource.VolumeHandle,
|
||||
publishVolumeInfo,
|
||||
deviceMountPath,
|
||||
fsType,
|
||||
accessMode,
|
||||
nodeStageSecrets,
|
||||
csiSource.VolumeAttributes)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf(log("attacher.MountDevice failed: %v", err))
|
||||
if removeMountDirErr := removeMountDir(c.plugin, deviceMountPath); removeMountDirErr != nil {
|
||||
glog.Error(log("attacher.MountDevice failed to remove mount dir after a NodeStageVolume() error [%s]: %v", deviceMountPath, removeMountDirErr))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("attacher.MountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ volume.Detacher = &csiAttacher{}
|
||||
|
||||
func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error {
|
||||
// volumeName in format driverName<SEP>volumeHandle generated by plugin.GetVolumeName()
|
||||
if volumeName == "" {
|
||||
glog.Error(log("detacher.Detach missing value for parameter volumeName"))
|
||||
return errors.New("missing expected parameter volumeName")
|
||||
}
|
||||
parts := strings.Split(volumeName, volNameSep)
|
||||
if len(parts) != 2 {
|
||||
glog.Error(log("detacher.Detach insufficient info encoded in volumeName"))
|
||||
return errors.New("volumeName missing expected data")
|
||||
}
|
||||
|
||||
driverName := parts[0]
|
||||
volID := parts[1]
|
||||
attachID := getAttachmentName(volID, driverName, string(nodeName))
|
||||
if err := c.k8s.StorageV1beta1().VolumeAttachments().Delete(attachID, nil); err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
// object deleted or never existed, done
|
||||
glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volID))
|
||||
return nil
|
||||
}
|
||||
glog.Error(log("detacher.Detach failed to delete VolumeAttachment [%s]: %v", attachID, err))
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Info(log("detacher deleted ok VolumeAttachment.ID=%s", attachID))
|
||||
return c.waitForVolumeDetachment(volID, attachID)
|
||||
}
|
||||
|
||||
func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string) error {
|
||||
glog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID))
|
||||
|
||||
timeout := c.waitSleepTime * 10
|
||||
timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable
|
||||
defer timer.Stop()
|
||||
|
||||
return c.waitForVolumeDetachmentInternal(volumeHandle, attachID, timer, timeout)
|
||||
}
|
||||
|
||||
func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID string, timer *time.Timer, timeout time.Duration) error {
|
||||
glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
|
||||
attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
//object deleted or never existed, done
|
||||
glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volumeHandle))
|
||||
return nil
|
||||
}
|
||||
glog.Error(log("detacher.WaitForDetach failed for volume [%s] (will continue to try): %v", volumeHandle, err))
|
||||
return err
|
||||
}
|
||||
// driver reports attach error
|
||||
detachErr := attach.Status.DetachError
|
||||
if detachErr != nil {
|
||||
glog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message))
|
||||
return errors.New(detachErr.Message)
|
||||
}
|
||||
|
||||
watcher, err := c.k8s.StorageV1beta1().VolumeAttachments().Watch(meta.SingleObject(meta.ObjectMeta{Name: attachID, ResourceVersion: attach.ResourceVersion}))
|
||||
if err != nil {
|
||||
return fmt.Errorf("watch error:%v for volume %v", err, volumeHandle)
|
||||
}
|
||||
ch := watcher.ResultChan()
|
||||
defer watcher.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-ch:
|
||||
if !ok {
|
||||
glog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID)
|
||||
return errors.New("volume attachment watch channel had been closed")
|
||||
}
|
||||
|
||||
switch event.Type {
|
||||
case watch.Added, watch.Modified:
|
||||
attach, _ := event.Object.(*storage.VolumeAttachment)
|
||||
// driver reports attach error
|
||||
detachErr := attach.Status.DetachError
|
||||
if detachErr != nil {
|
||||
glog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message))
|
||||
return errors.New(detachErr.Message)
|
||||
}
|
||||
case watch.Deleted:
|
||||
//object deleted
|
||||
glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] has been deleted", attachID, volumeHandle))
|
||||
return nil
|
||||
|
||||
case watch.Error:
|
||||
// start another cycle
|
||||
c.waitForVolumeDetachmentInternal(volumeHandle, attachID, timer, timeout)
|
||||
}
|
||||
|
||||
case <-timer.C:
|
||||
glog.Error(log("detacher.WaitForDetach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID))
|
||||
return fmt.Errorf("detachment timeout for volume %v", volumeHandle)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
|
||||
glog.V(4).Info(log("attacher.UnmountDevice(%s)", deviceMountPath))
|
||||
|
||||
// Setup
|
||||
driverName, volID, err := getDriverAndVolNameFromDeviceMountPath(c.k8s, deviceMountPath)
|
||||
if err != nil {
|
||||
glog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
if c.csiClient == nil {
|
||||
c.csiClient = newCsiDriverClient(driverName)
|
||||
}
|
||||
csi := c.csiClient
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
// Check whether "STAGE_UNSTAGE_VOLUME" is set
|
||||
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
|
||||
if err != nil {
|
||||
glog.Errorf(log("attacher.UnmountDevice failed to check whether STAGE_UNSTAGE_VOLUME set: %v", err))
|
||||
return err
|
||||
}
|
||||
if !stageUnstageSet {
|
||||
glog.Infof(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice..."))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start UnmountDevice
|
||||
err = csi.NodeUnstageVolume(ctx,
|
||||
volID,
|
||||
deviceMountPath)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf(log("attacher.UnmountDevice failed: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("attacher.UnmountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
|
||||
return nil
|
||||
}
|
||||
|
||||
func hasStageUnstageCapability(ctx context.Context, csi csiClient) (bool, error) {
|
||||
capabilities, err := csi.NodeGetCapabilities(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
stageUnstageSet := false
|
||||
if capabilities == nil {
|
||||
return false, nil
|
||||
}
|
||||
for _, capability := range capabilities {
|
||||
if capability.GetRpc().GetType() == csipb.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME {
|
||||
stageUnstageSet = true
|
||||
}
|
||||
}
|
||||
return stageUnstageSet, nil
|
||||
}
|
||||
|
||||
// getAttachmentName returns csi-<sha252(volName,csiDriverName,NodeName>
|
||||
func getAttachmentName(volName, csiDriverName, nodeName string) string {
|
||||
result := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", volName, csiDriverName, nodeName)))
|
||||
return fmt.Sprintf("csi-%x", result)
|
||||
}
|
||||
|
||||
func makeDeviceMountPath(plugin *csiPlugin, spec *volume.Spec) (string, error) {
|
||||
if spec == nil {
|
||||
return "", fmt.Errorf("makeDeviceMountPath failed, spec is nil")
|
||||
}
|
||||
|
||||
pvName := spec.PersistentVolume.Name
|
||||
if pvName == "" {
|
||||
return "", fmt.Errorf("makeDeviceMountPath failed, pv name empty")
|
||||
}
|
||||
|
||||
return path.Join(plugin.host.GetPluginDir(plugin.GetPluginName()), persistentVolumeInGlobalPath, pvName, globalMountInGlobalPath), nil
|
||||
}
|
||||
|
||||
func getDriverAndVolNameFromDeviceMountPath(k8s kubernetes.Interface, deviceMountPath string) (string, string, error) {
|
||||
// deviceMountPath structure: /var/lib/kubelet/plugins/kubernetes.io/csi/pv/{pvname}/globalmount
|
||||
dir := filepath.Dir(deviceMountPath)
|
||||
if file := filepath.Base(deviceMountPath); file != globalMountInGlobalPath {
|
||||
return "", "", fmt.Errorf("getDriverAndVolNameFromDeviceMountPath failed, path did not end in %s", globalMountInGlobalPath)
|
||||
}
|
||||
// dir is now /var/lib/kubelet/plugins/kubernetes.io/csi/pv/{pvname}
|
||||
pvName := filepath.Base(dir)
|
||||
|
||||
// Get PV and check for errors
|
||||
pv, err := k8s.CoreV1().PersistentVolumes().Get(pvName, meta.GetOptions{})
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if pv == nil || pv.Spec.CSI == nil {
|
||||
return "", "", fmt.Errorf("getDriverAndVolNameFromDeviceMountPath could not find CSI Persistent Volume Source for pv: %s", pvName)
|
||||
}
|
||||
|
||||
// Get VolumeHandle and PluginName from pv
|
||||
csiSource := pv.Spec.CSI
|
||||
if csiSource.Driver == "" {
|
||||
return "", "", fmt.Errorf("getDriverAndVolNameFromDeviceMountPath failed, driver name empty")
|
||||
}
|
||||
if csiSource.VolumeHandle == "" {
|
||||
return "", "", fmt.Errorf("getDriverAndVolNameFromDeviceMountPath failed, VolumeHandle empty")
|
||||
}
|
||||
|
||||
return csiSource.Driver, csiSource.VolumeHandle, nil
|
||||
}
|
747
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher_test.go
generated
vendored
Normal file
747
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher_test.go
generated
vendored
Normal file
@@ -0,0 +1,747 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
storage "k8s.io/api/storage/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
fakeclient "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func makeTestAttachment(attachID, nodeName, pvName string) *storage.VolumeAttachment {
|
||||
return &storage.VolumeAttachment{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: attachID,
|
||||
},
|
||||
Spec: storage.VolumeAttachmentSpec{
|
||||
NodeName: nodeName,
|
||||
Attacher: "mock",
|
||||
Source: storage.VolumeAttachmentSource{
|
||||
PersistentVolumeName: &pvName,
|
||||
},
|
||||
},
|
||||
Status: storage.VolumeAttachmentStatus{
|
||||
Attached: false,
|
||||
AttachError: nil,
|
||||
DetachError: nil,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacherAttach(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
nodeName string
|
||||
driverName string
|
||||
volumeName string
|
||||
attachID string
|
||||
injectAttacherError bool
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
name: "test ok 1",
|
||||
nodeName: "testnode-01",
|
||||
driverName: "testdriver-01",
|
||||
volumeName: "testvol-01",
|
||||
attachID: getAttachmentName("testvol-01", "testdriver-01", "testnode-01"),
|
||||
},
|
||||
{
|
||||
name: "test ok 2",
|
||||
nodeName: "node02",
|
||||
driverName: "driver02",
|
||||
volumeName: "vol02",
|
||||
attachID: getAttachmentName("vol02", "driver02", "node02"),
|
||||
},
|
||||
{
|
||||
name: "mismatch vol",
|
||||
nodeName: "node02",
|
||||
driverName: "driver02",
|
||||
volumeName: "vol01",
|
||||
attachID: getAttachmentName("vol02", "driver02", "node02"),
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
name: "mismatch driver",
|
||||
nodeName: "node02",
|
||||
driverName: "driver000",
|
||||
volumeName: "vol02",
|
||||
attachID: getAttachmentName("vol02", "driver02", "node02"),
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
name: "mismatch node",
|
||||
nodeName: "node000",
|
||||
driverName: "driver000",
|
||||
volumeName: "vol02",
|
||||
attachID: getAttachmentName("vol02", "driver02", "node02"),
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
name: "attacher error",
|
||||
nodeName: "node02",
|
||||
driverName: "driver02",
|
||||
volumeName: "vol02",
|
||||
attachID: getAttachmentName("vol02", "driver02", "node02"),
|
||||
injectAttacherError: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
// attacher loop
|
||||
for i, tc := range testCases {
|
||||
t.Logf("test case: %s", tc.name)
|
||||
|
||||
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
attacher, err := plug.NewAttacher()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err)
|
||||
}
|
||||
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
|
||||
spec := volume.NewSpecFromPersistentVolume(makeTestPV(fmt.Sprintf("test-pv%d", i), 10, tc.driverName, tc.volumeName), false)
|
||||
|
||||
go func(id, nodename string, fail bool) {
|
||||
attachID, err := csiAttacher.Attach(spec, types.NodeName(nodename))
|
||||
if !fail && err != nil {
|
||||
t.Errorf("expecting no failure, but got err: %v", err)
|
||||
}
|
||||
if fail && err == nil {
|
||||
t.Errorf("expecting failure, but got no err")
|
||||
}
|
||||
if attachID != id && !fail {
|
||||
t.Errorf("expecting attachID %v, got %v", id, attachID)
|
||||
}
|
||||
}(tc.attachID, tc.nodeName, tc.shouldFail)
|
||||
|
||||
// update attachment to avoid long waitForAttachment
|
||||
ticker := time.NewTicker(10 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
// wait for attachment to be saved
|
||||
var attach *storage.VolumeAttachment
|
||||
for i := 0; i < 100; i++ {
|
||||
attach, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Get(tc.attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
<-ticker.C
|
||||
continue
|
||||
}
|
||||
t.Error(err)
|
||||
}
|
||||
if attach != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if attach == nil {
|
||||
t.Logf("attachment not found for id:%v", tc.attachID)
|
||||
} else {
|
||||
if tc.injectAttacherError {
|
||||
attach.Status.Attached = false
|
||||
attach.Status.AttachError = &storage.VolumeError{
|
||||
Message: "attacher error",
|
||||
}
|
||||
} else {
|
||||
attach.Status.Attached = true
|
||||
}
|
||||
_, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Update(attach)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
fakeWatcher.Modify(attach)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacherWaitForVolumeAttachment(t *testing.T) {
|
||||
nodeName := "test-node"
|
||||
testCases := []struct {
|
||||
name string
|
||||
initAttached bool
|
||||
finalAttached bool
|
||||
trigerWatchEventTime time.Duration
|
||||
initAttachErr *storage.VolumeError
|
||||
finalAttachErr *storage.VolumeError
|
||||
timeout time.Duration
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
name: "attach success at get",
|
||||
initAttached: true,
|
||||
timeout: 50 * time.Millisecond,
|
||||
shouldFail: false,
|
||||
},
|
||||
{
|
||||
name: "attachment error ant get",
|
||||
initAttachErr: &storage.VolumeError{Message: "missing volume"},
|
||||
timeout: 30 * time.Millisecond,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
name: "attach success at watch",
|
||||
initAttached: false,
|
||||
finalAttached: true,
|
||||
trigerWatchEventTime: 5 * time.Millisecond,
|
||||
timeout: 50 * time.Millisecond,
|
||||
shouldFail: false,
|
||||
},
|
||||
{
|
||||
name: "attachment error ant watch",
|
||||
initAttached: false,
|
||||
finalAttached: false,
|
||||
finalAttachErr: &storage.VolumeError{Message: "missing volume"},
|
||||
trigerWatchEventTime: 5 * time.Millisecond,
|
||||
timeout: 30 * time.Millisecond,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
name: "time ran out",
|
||||
initAttached: false,
|
||||
finalAttached: true,
|
||||
trigerWatchEventTime: 100 * time.Millisecond,
|
||||
timeout: 50 * time.Millisecond,
|
||||
shouldFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
attacher, err := plug.NewAttacher()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err)
|
||||
}
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
t.Logf("running test: %v", tc.name)
|
||||
pvName := fmt.Sprintf("test-pv-%d", i)
|
||||
volID := fmt.Sprintf("test-vol-%d", i)
|
||||
attachID := getAttachmentName(volID, testDriver, nodeName)
|
||||
attachment := makeTestAttachment(attachID, nodeName, pvName)
|
||||
attachment.Status.Attached = tc.initAttached
|
||||
attachment.Status.AttachError = tc.initAttachErr
|
||||
_, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to attach: %v", err)
|
||||
}
|
||||
|
||||
trigerWatchEventTime := tc.trigerWatchEventTime
|
||||
finalAttached := tc.finalAttached
|
||||
finalAttachErr := tc.finalAttachErr
|
||||
// after timeout, fakeWatcher will be closed by csiAttacher.waitForVolumeAttachment
|
||||
if tc.trigerWatchEventTime > 0 && tc.trigerWatchEventTime < tc.timeout {
|
||||
go func() {
|
||||
time.Sleep(trigerWatchEventTime)
|
||||
attachment := makeTestAttachment(attachID, nodeName, pvName)
|
||||
attachment.Status.Attached = finalAttached
|
||||
attachment.Status.AttachError = finalAttachErr
|
||||
fakeWatcher.Modify(attachment)
|
||||
}()
|
||||
}
|
||||
|
||||
retID, err := csiAttacher.waitForVolumeAttachment(volID, attachID, tc.timeout)
|
||||
if tc.shouldFail && err == nil {
|
||||
t.Error("expecting failure, but err is nil")
|
||||
}
|
||||
if tc.initAttachErr != nil {
|
||||
if tc.initAttachErr.Message != err.Error() {
|
||||
t.Errorf("expecting error [%v], got [%v]", tc.initAttachErr.Message, err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil && retID != attachID {
|
||||
t.Errorf("attacher.WaitForAttach not returning attachment ID")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacherVolumesAreAttached(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
attacher, err := plug.NewAttacher()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err)
|
||||
}
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
nodeName := "test-node"
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
attachedStats map[string]bool
|
||||
}{
|
||||
{"attach + detach", map[string]bool{"vol-01": true, "vol-02": true, "vol-03": false, "vol-04": false, "vol-05": true}},
|
||||
{"all detached", map[string]bool{"vol-11": false, "vol-12": false, "vol-13": false, "vol-14": false, "vol-15": false}},
|
||||
{"all attached", map[string]bool{"vol-21": true, "vol-22": true, "vol-23": true, "vol-24": true, "vol-25": true}},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
var specs []*volume.Spec
|
||||
// create and save volume attchments
|
||||
for volName, stat := range tc.attachedStats {
|
||||
pv := makeTestPV("test-pv", 10, testDriver, volName)
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
specs = append(specs, spec)
|
||||
attachID := getAttachmentName(volName, testDriver, nodeName)
|
||||
attachment := makeTestAttachment(attachID, nodeName, pv.GetName())
|
||||
attachment.Status.Attached = stat
|
||||
_, err := csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to attach: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// retrieve attached status
|
||||
stats, err := csiAttacher.VolumesAreAttached(specs, types.NodeName(nodeName))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(tc.attachedStats) != len(stats) {
|
||||
t.Errorf("expecting %d attachment status, got %d", len(tc.attachedStats), len(stats))
|
||||
}
|
||||
|
||||
// compare attachment status for each spec
|
||||
for spec, stat := range stats {
|
||||
source, err := getCSISourceFromSpec(spec)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if stat != tc.attachedStats[source.VolumeHandle] {
|
||||
t.Errorf("expecting volume attachment %t, got %t", tc.attachedStats[source.VolumeHandle], stat)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacherDetach(t *testing.T) {
|
||||
|
||||
nodeName := "test-node"
|
||||
testCases := []struct {
|
||||
name string
|
||||
volID string
|
||||
attachID string
|
||||
shouldFail bool
|
||||
reactor func(action core.Action) (handled bool, ret runtime.Object, err error)
|
||||
}{
|
||||
{name: "normal test", volID: "vol-001", attachID: getAttachmentName("vol-001", testDriver, nodeName)},
|
||||
{name: "normal test 2", volID: "vol-002", attachID: getAttachmentName("vol-002", testDriver, nodeName)},
|
||||
{name: "object not found", volID: "vol-non-existing", attachID: getAttachmentName("vol-003", testDriver, nodeName)},
|
||||
{
|
||||
name: "API error",
|
||||
volID: "vol-004",
|
||||
attachID: getAttachmentName("vol-004", testDriver, nodeName),
|
||||
shouldFail: true, // All other API errors should be propagated to caller
|
||||
reactor: func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
// return Forbidden to all DELETE requests
|
||||
if action.Matches("delete", "volumeattachments") {
|
||||
return true, nil, apierrs.NewForbidden(action.GetResource().GroupResource(), action.GetNamespace(), fmt.Errorf("mock error"))
|
||||
}
|
||||
return false, nil, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("running test: %v", tc.name)
|
||||
plug, fakeWatcher, tmpDir, client := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
if tc.reactor != nil {
|
||||
client.PrependReactor("*", "*", tc.reactor)
|
||||
}
|
||||
|
||||
attacher, err0 := plug.NewAttacher()
|
||||
if err0 != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err0)
|
||||
}
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
|
||||
pv := makeTestPV("test-pv", 10, testDriver, tc.volID)
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
attachment := makeTestAttachment(tc.attachID, nodeName, "test-pv")
|
||||
_, err := csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to attach: %v", err)
|
||||
}
|
||||
volumeName, err := plug.GetVolumeName(spec)
|
||||
if err != nil {
|
||||
t.Errorf("test case %s failed: %v", tc.name, err)
|
||||
}
|
||||
go func() {
|
||||
fakeWatcher.Delete(attachment)
|
||||
}()
|
||||
err = csiAttacher.Detach(volumeName, types.NodeName(nodeName))
|
||||
if tc.shouldFail && err == nil {
|
||||
t.Fatal("expecting failure, but err = nil")
|
||||
}
|
||||
if !tc.shouldFail && err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
attach, err := csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Get(tc.attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
if !apierrs.IsNotFound(err) {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
} else {
|
||||
if attach == nil {
|
||||
t.Errorf("expecting attachment not to be nil, but it is")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacherGetDeviceMountPath(t *testing.T) {
|
||||
// Setup
|
||||
// Create a new attacher
|
||||
plug, _, tmpDir, _ := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
attacher, err0 := plug.NewAttacher()
|
||||
if err0 != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err0)
|
||||
}
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
|
||||
pluginDir := csiAttacher.plugin.host.GetPluginDir(plug.GetPluginName())
|
||||
|
||||
testCases := []struct {
|
||||
testName string
|
||||
pvName string
|
||||
expectedMountPath string
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
testName: "normal test",
|
||||
pvName: "test-pv1",
|
||||
expectedMountPath: pluginDir + "/pv/test-pv1/globalmount",
|
||||
},
|
||||
{
|
||||
testName: "no pv name",
|
||||
pvName: "",
|
||||
expectedMountPath: pluginDir + "/pv/test-pv1/globalmount",
|
||||
shouldFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("Running test case: %s", tc.testName)
|
||||
var spec *volume.Spec
|
||||
|
||||
// Create spec
|
||||
pv := makeTestPV(tc.pvName, 10, testDriver, "testvol")
|
||||
spec = volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
|
||||
// Run
|
||||
mountPath, err := csiAttacher.GetDeviceMountPath(spec)
|
||||
|
||||
// Verify
|
||||
if err != nil && !tc.shouldFail {
|
||||
t.Errorf("test should not fail, but error occurred: %v", err)
|
||||
} else if err == nil {
|
||||
if tc.shouldFail {
|
||||
t.Errorf("test should fail, but no error occurred")
|
||||
} else if mountPath != tc.expectedMountPath {
|
||||
t.Errorf("mountPath does not equal expectedMountPath. Got: %s. Expected: %s", mountPath, tc.expectedMountPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacherMountDevice(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
volName string
|
||||
devicePath string
|
||||
deviceMountPath string
|
||||
stageUnstageSet bool
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
testName: "normal",
|
||||
volName: "test-vol1",
|
||||
devicePath: "path1",
|
||||
deviceMountPath: "path2",
|
||||
stageUnstageSet: true,
|
||||
},
|
||||
{
|
||||
testName: "no vol name",
|
||||
volName: "",
|
||||
devicePath: "path1",
|
||||
deviceMountPath: "path2",
|
||||
stageUnstageSet: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
testName: "no device path",
|
||||
volName: "test-vol1",
|
||||
devicePath: "",
|
||||
deviceMountPath: "path2",
|
||||
stageUnstageSet: true,
|
||||
shouldFail: false,
|
||||
},
|
||||
{
|
||||
testName: "no device mount path",
|
||||
volName: "test-vol1",
|
||||
devicePath: "path1",
|
||||
deviceMountPath: "",
|
||||
stageUnstageSet: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
testName: "stage_unstage cap not set",
|
||||
volName: "test-vol1",
|
||||
devicePath: "path1",
|
||||
deviceMountPath: "path2",
|
||||
stageUnstageSet: false,
|
||||
},
|
||||
{
|
||||
testName: "stage_unstage not set no vars should not fail",
|
||||
stageUnstageSet: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("Running test case: %s", tc.testName)
|
||||
var spec *volume.Spec
|
||||
pvName := "test-pv"
|
||||
|
||||
// Setup
|
||||
// Create a new attacher
|
||||
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
attacher, err0 := plug.NewAttacher()
|
||||
if err0 != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err0)
|
||||
}
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
csiAttacher.csiClient = setupClient(t, tc.stageUnstageSet)
|
||||
|
||||
nodeName := string(csiAttacher.plugin.host.GetNodeName())
|
||||
|
||||
// Create spec
|
||||
pv := makeTestPV(pvName, 10, testDriver, tc.volName)
|
||||
spec = volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
|
||||
attachID := getAttachmentName(tc.volName, testDriver, nodeName)
|
||||
|
||||
// Set up volume attachment
|
||||
attachment := makeTestAttachment(attachID, nodeName, pvName)
|
||||
_, err := csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to attach: %v", err)
|
||||
}
|
||||
go func() {
|
||||
fakeWatcher.Delete(attachment)
|
||||
}()
|
||||
|
||||
// Run
|
||||
err = csiAttacher.MountDevice(spec, tc.devicePath, tc.deviceMountPath)
|
||||
|
||||
// Verify
|
||||
if err != nil {
|
||||
if !tc.shouldFail {
|
||||
t.Errorf("test should not fail, but error occurred: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err == nil && tc.shouldFail {
|
||||
t.Errorf("test should fail, but no error occurred")
|
||||
}
|
||||
|
||||
// Verify call goes through all the way
|
||||
numStaged := 1
|
||||
if !tc.stageUnstageSet {
|
||||
numStaged = 0
|
||||
}
|
||||
|
||||
cdc := csiAttacher.csiClient.(*fakeCsiDriverClient)
|
||||
staged := cdc.nodeClient.GetNodeStagedVolumes()
|
||||
if len(staged) != numStaged {
|
||||
t.Errorf("got wrong number of staged volumes, expecting %v got: %v", numStaged, len(staged))
|
||||
}
|
||||
if tc.stageUnstageSet {
|
||||
gotPath, ok := staged[tc.volName]
|
||||
if !ok {
|
||||
t.Errorf("could not find staged volume: %s", tc.volName)
|
||||
}
|
||||
if gotPath != tc.deviceMountPath {
|
||||
t.Errorf("expected mount path: %s. got: %s", tc.deviceMountPath, gotPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacherUnmountDevice(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
volID string
|
||||
deviceMountPath string
|
||||
stageUnstageSet bool
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
testName: "normal",
|
||||
volID: "project/zone/test-vol1",
|
||||
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
|
||||
stageUnstageSet: true,
|
||||
},
|
||||
{
|
||||
testName: "no volID",
|
||||
volID: "",
|
||||
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
|
||||
stageUnstageSet: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
testName: "no device mount path",
|
||||
volID: "project/zone/test-vol1",
|
||||
deviceMountPath: "",
|
||||
stageUnstageSet: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
testName: "missing part of device mount path",
|
||||
volID: "project/zone/test-vol1",
|
||||
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
|
||||
stageUnstageSet: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
testName: "test volume name mismatch",
|
||||
volID: "project/zone/test-vol1",
|
||||
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
|
||||
stageUnstageSet: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
testName: "stage_unstage not set",
|
||||
volID: "project/zone/test-vol1",
|
||||
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
|
||||
stageUnstageSet: false,
|
||||
},
|
||||
{
|
||||
testName: "stage_unstage not set no vars should not fail",
|
||||
stageUnstageSet: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("Running test case: %s", tc.testName)
|
||||
// Setup
|
||||
// Create a new attacher
|
||||
plug, _, tmpDir, _ := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
attacher, err0 := plug.NewAttacher()
|
||||
if err0 != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err0)
|
||||
}
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
csiAttacher.csiClient = setupClient(t, tc.stageUnstageSet)
|
||||
|
||||
// Add the volume to NodeStagedVolumes
|
||||
cdc := csiAttacher.csiClient.(*fakeCsiDriverClient)
|
||||
cdc.nodeClient.AddNodeStagedVolume(tc.volID, tc.deviceMountPath)
|
||||
|
||||
// Make the PV for this object
|
||||
dir := filepath.Dir(tc.deviceMountPath)
|
||||
// dir is now /var/lib/kubelet/plugins/kubernetes.io/csi/pv/{pvname}
|
||||
pvName := filepath.Base(dir)
|
||||
pv := makeTestPV(pvName, 5, "csi", tc.volID)
|
||||
_, err := csiAttacher.k8s.CoreV1().PersistentVolumes().Create(pv)
|
||||
if err != nil && !tc.shouldFail {
|
||||
t.Fatalf("Failed to create PV: %v", err)
|
||||
}
|
||||
|
||||
// Run
|
||||
err = csiAttacher.UnmountDevice(tc.deviceMountPath)
|
||||
|
||||
// Verify
|
||||
if err != nil {
|
||||
if !tc.shouldFail {
|
||||
t.Errorf("test should not fail, but error occurred: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if err == nil && tc.shouldFail {
|
||||
t.Errorf("test should fail, but no error occurred")
|
||||
}
|
||||
|
||||
// Verify call goes through all the way
|
||||
expectedSet := 0
|
||||
if !tc.stageUnstageSet {
|
||||
expectedSet = 1
|
||||
}
|
||||
staged := cdc.nodeClient.GetNodeStagedVolumes()
|
||||
if len(staged) != expectedSet {
|
||||
t.Errorf("got wrong number of staged volumes, expecting %v got: %v", expectedSet, len(staged))
|
||||
}
|
||||
|
||||
_, ok := staged[tc.volID]
|
||||
if ok && tc.stageUnstageSet {
|
||||
t.Errorf("found unexpected staged volume: %s", tc.volID)
|
||||
} else if !ok && !tc.stageUnstageSet {
|
||||
t.Errorf("could not find expected staged volume: %s", tc.volID)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// create a plugin mgr to load plugins and setup a fake client
|
||||
func newTestWatchPlugin(t *testing.T) (*csiPlugin, *watch.RaceFreeFakeWatcher, string, *fakeclient.Clientset) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("csi-test")
|
||||
if err != nil {
|
||||
t.Fatalf("can't create temp dir: %v", err)
|
||||
}
|
||||
|
||||
fakeClient := fakeclient.NewSimpleClientset()
|
||||
fakeWatcher := watch.NewRaceFreeFake()
|
||||
fakeClient.Fake.PrependWatchReactor("*", core.DefaultWatchReactor(fakeWatcher, nil))
|
||||
fakeClient.Fake.WatchReactionChain = fakeClient.Fake.WatchReactionChain[:1]
|
||||
host := volumetest.NewFakeVolumeHost(
|
||||
tmpDir,
|
||||
fakeClient,
|
||||
nil,
|
||||
)
|
||||
plugMgr := &volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
|
||||
|
||||
plug, err := plugMgr.FindPluginByName(csiPluginName)
|
||||
if err != nil {
|
||||
t.Fatalf("can't find plugin %v", csiPluginName)
|
||||
}
|
||||
|
||||
csiPlug, ok := plug.(*csiPlugin)
|
||||
if !ok {
|
||||
t.Fatalf("cannot assert plugin to be type csiPlugin")
|
||||
}
|
||||
|
||||
return csiPlug, fakeWatcher, tmpDir, fakeClient
|
||||
}
|
283
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go
generated
vendored
Normal file
283
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go
generated
vendored
Normal file
@@ -0,0 +1,283 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
type csiBlockMapper struct {
|
||||
k8s kubernetes.Interface
|
||||
csiClient csiClient
|
||||
plugin *csiPlugin
|
||||
driverName string
|
||||
specName string
|
||||
volumeID string
|
||||
readOnly bool
|
||||
spec *volume.Spec
|
||||
podUID types.UID
|
||||
volumeInfo map[string]string
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeMapper = &csiBlockMapper{}
|
||||
|
||||
// GetGlobalMapPath returns a path (on the node) where the devicePath will be symlinked to
|
||||
// Example: plugins/kubernetes.io/csi/volumeDevices/{volumeID}
|
||||
func (m *csiBlockMapper) GetGlobalMapPath(spec *volume.Spec) (string, error) {
|
||||
dir := getVolumeDevicePluginDir(spec.Name(), m.plugin.host)
|
||||
glog.V(4).Infof(log("blockMapper.GetGlobalMapPath = %s", dir))
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
// GetPodDeviceMapPath returns pod's device map path and volume name
|
||||
// path: pods/{podUid}/volumeDevices/kubernetes.io~csi/, {volumeID}
|
||||
func (m *csiBlockMapper) GetPodDeviceMapPath() (string, string) {
|
||||
path, specName := m.plugin.host.GetPodVolumeDeviceDir(m.podUID, csiPluginName), m.specName
|
||||
glog.V(4).Infof(log("blockMapper.GetPodDeviceMapPath = %s", path))
|
||||
return path, specName
|
||||
}
|
||||
|
||||
// SetUpDevice ensures the device is attached returns path where the device is located.
|
||||
func (m *csiBlockMapper) SetUpDevice() (string, error) {
|
||||
if !m.plugin.blockEnabled {
|
||||
return "", errors.New("CSIBlockVolume feature not enabled")
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("blockMapper.SetupDevice called"))
|
||||
|
||||
if m.spec == nil {
|
||||
glog.Error(log("blockMapper.Map spec is nil"))
|
||||
return "", fmt.Errorf("spec is nil")
|
||||
}
|
||||
csiSource, err := getCSISourceFromSpec(m.spec)
|
||||
if err != nil {
|
||||
glog.Error(log("blockMapper.SetupDevice failed to get CSI persistent source: %v", err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
globalMapPath, err := m.GetGlobalMapPath(m.spec)
|
||||
if err != nil {
|
||||
glog.Error(log("blockMapper.SetupDevice failed to get global map path: %v", err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
csi := m.csiClient
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Check whether "STAGE_UNSTAGE_VOLUME" is set
|
||||
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
|
||||
if err != nil {
|
||||
glog.Error(log("blockMapper.SetupDevice failed to check STAGE_UNSTAGE_VOLUME capability: %v", err))
|
||||
return "", err
|
||||
}
|
||||
if !stageUnstageSet {
|
||||
glog.Infof(log("blockMapper.SetupDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Start MountDevice
|
||||
nodeName := string(m.plugin.host.GetNodeName())
|
||||
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
|
||||
|
||||
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
|
||||
attachment, err := m.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Error(log("blockMapper.SetupDevice failed to get volume attachment [id=%v]: %v", attachID, err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
if attachment == nil {
|
||||
glog.Error(log("blockMapper.SetupDevice unable to find VolumeAttachment [id=%s]", attachID))
|
||||
return "", errors.New("no existing VolumeAttachment found")
|
||||
}
|
||||
publishVolumeInfo := attachment.Status.AttachmentMetadata
|
||||
|
||||
nodeStageSecrets := map[string]string{}
|
||||
if csiSource.NodeStageSecretRef != nil {
|
||||
nodeStageSecrets, err = getCredentialsFromSecret(m.k8s, csiSource.NodeStageSecretRef)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get NodeStageSecretRef %s/%s: %v",
|
||||
csiSource.NodeStageSecretRef.Namespace, csiSource.NodeStageSecretRef.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// create globalMapPath before call to NodeStageVolume
|
||||
if err := os.MkdirAll(globalMapPath, 0750); err != nil {
|
||||
glog.Error(log("blockMapper.SetupDevice failed to create dir %s: %v", globalMapPath, err))
|
||||
return "", err
|
||||
}
|
||||
glog.V(4).Info(log("blockMapper.SetupDevice created global device map path successfully [%s]", globalMapPath))
|
||||
|
||||
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
|
||||
accessMode := v1.ReadWriteOnce
|
||||
if m.spec.PersistentVolume.Spec.AccessModes != nil {
|
||||
accessMode = m.spec.PersistentVolume.Spec.AccessModes[0]
|
||||
}
|
||||
|
||||
err = csi.NodeStageVolume(ctx,
|
||||
csiSource.VolumeHandle,
|
||||
publishVolumeInfo,
|
||||
globalMapPath,
|
||||
fsTypeBlockName,
|
||||
accessMode,
|
||||
nodeStageSecrets,
|
||||
csiSource.VolumeAttributes)
|
||||
|
||||
if err != nil {
|
||||
glog.Error(log("blockMapper.SetupDevice failed: %v", err))
|
||||
if err := os.RemoveAll(globalMapPath); err != nil {
|
||||
glog.Error(log("blockMapper.SetupDevice failed to remove dir after a NodeStageVolume() error [%s]: %v", globalMapPath, err))
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("blockMapper.SetupDevice successfully requested NodeStageVolume [%s]", globalMapPath))
|
||||
return globalMapPath, nil
|
||||
}
|
||||
|
||||
func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
|
||||
if !m.plugin.blockEnabled {
|
||||
return errors.New("CSIBlockVolume feature not enabled")
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("blockMapper.MapDevice mapping block device %s", devicePath))
|
||||
|
||||
if m.spec == nil {
|
||||
glog.Error(log("blockMapper.MapDevice spec is nil"))
|
||||
return fmt.Errorf("spec is nil")
|
||||
}
|
||||
|
||||
csiSource, err := getCSISourceFromSpec(m.spec)
|
||||
if err != nil {
|
||||
glog.Error(log("blockMapper.Map failed to get CSI persistent source: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
dir := filepath.Join(volumeMapPath, volumeMapName)
|
||||
csi := m.csiClient
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
|
||||
nodeName := string(m.plugin.host.GetNodeName())
|
||||
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
|
||||
|
||||
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
|
||||
attachment, err := m.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Error(log("blockMapper.MapDevice failed to get volume attachment [id=%v]: %v", attachID, err))
|
||||
return err
|
||||
}
|
||||
|
||||
if attachment == nil {
|
||||
glog.Error(log("blockMapper.MapDevice unable to find VolumeAttachment [id=%s]", attachID))
|
||||
return errors.New("no existing VolumeAttachment found")
|
||||
}
|
||||
publishVolumeInfo := attachment.Status.AttachmentMetadata
|
||||
|
||||
nodePublishSecrets := map[string]string{}
|
||||
if csiSource.NodePublishSecretRef != nil {
|
||||
nodePublishSecrets, err = getCredentialsFromSecret(m.k8s, csiSource.NodePublishSecretRef)
|
||||
if err != nil {
|
||||
glog.Errorf("blockMapper.MapDevice failed to get NodePublishSecretRef %s/%s: %v",
|
||||
csiSource.NodePublishSecretRef.Namespace, csiSource.NodePublishSecretRef.Name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
glog.Error(log("blockMapper.MapDevice failed to create dir %#v: %v", dir, err))
|
||||
return err
|
||||
}
|
||||
glog.V(4).Info(log("blockMapper.MapDevice created NodePublish path [%s]", dir))
|
||||
|
||||
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
|
||||
accessMode := v1.ReadWriteOnce
|
||||
if m.spec.PersistentVolume.Spec.AccessModes != nil {
|
||||
accessMode = m.spec.PersistentVolume.Spec.AccessModes[0]
|
||||
}
|
||||
|
||||
err = csi.NodePublishVolume(
|
||||
ctx,
|
||||
m.volumeID,
|
||||
m.readOnly,
|
||||
globalMapPath,
|
||||
dir,
|
||||
accessMode,
|
||||
publishVolumeInfo,
|
||||
csiSource.VolumeAttributes,
|
||||
nodePublishSecrets,
|
||||
fsTypeBlockName,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf(log("blockMapper.MapDevice failed: %v", err))
|
||||
if err := os.RemoveAll(dir); err != nil {
|
||||
glog.Error(log("blockMapper.MapDevice failed to remove mount dir after a NodePublish() error [%s]: %v", dir, err))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeUnmapper = &csiBlockMapper{}
|
||||
|
||||
// TearDownDevice removes traces of the SetUpDevice.
|
||||
func (m *csiBlockMapper) TearDownDevice(globalMapPath, devicePath string) error {
|
||||
if !m.plugin.blockEnabled {
|
||||
return errors.New("CSIBlockVolume feature not enabled")
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("unmapper.TearDownDevice(globalMapPath=%s; devicePath=%s)", globalMapPath, devicePath))
|
||||
|
||||
csi := m.csiClient
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
|
||||
// unmap global device map path
|
||||
if err := csi.NodeUnstageVolume(ctx, m.volumeID, globalMapPath); err != nil {
|
||||
glog.Errorf(log("blockMapper.TearDownDevice failed: %v", err))
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof(log("blockMapper.TearDownDevice NodeUnstageVolume successfully [%s]", globalMapPath))
|
||||
|
||||
// request to remove pod volume map path also
|
||||
podVolumePath, volumeName := m.GetPodDeviceMapPath()
|
||||
podVolumeMapPath := filepath.Join(podVolumePath, volumeName)
|
||||
if err := csi.NodeUnpublishVolume(ctx, m.volumeID, podVolumeMapPath); err != nil {
|
||||
glog.Error(log("blockMapper.TearDownDevice failed: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("blockMapper.TearDownDevice NodeUnpublished successfully [%s]", podVolumeMapPath))
|
||||
|
||||
return nil
|
||||
}
|
264
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block_test.go
generated
vendored
Normal file
264
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block_test.go
generated
vendored
Normal file
@@ -0,0 +1,264 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
api "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
fakeclient "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func TestBlockMapperGetGlobalMapPath(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// TODO (vladimirvivien) specName with slashes will not work
|
||||
testCases := []struct {
|
||||
name string
|
||||
specVolumeName string
|
||||
path string
|
||||
}{
|
||||
{
|
||||
name: "simple specName",
|
||||
specVolumeName: "spec-0",
|
||||
path: path.Join(tmpDir, fmt.Sprintf("plugins/kubernetes.io/csi/volumeDevices/%s/%s", "spec-0", "dev")),
|
||||
},
|
||||
{
|
||||
name: "specName with dots",
|
||||
specVolumeName: "test.spec.1",
|
||||
path: path.Join(tmpDir, fmt.Sprintf("plugins/kubernetes.io/csi/volumeDevices/%s/%s", "test.spec.1", "dev")),
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Logf("test case: %s", tc.name)
|
||||
pv := makeTestPV(tc.specVolumeName, 10, testDriver, testVol)
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
mapper, err := plug.NewBlockVolumeMapper(
|
||||
spec,
|
||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Mapper: %v", err)
|
||||
}
|
||||
csiMapper := mapper.(*csiBlockMapper)
|
||||
|
||||
path, err := csiMapper.GetGlobalMapPath(spec)
|
||||
if err != nil {
|
||||
t.Errorf("mapper GetGlobalMapPath failed: %v", err)
|
||||
}
|
||||
|
||||
if tc.path != path {
|
||||
t.Errorf("expecting path %s, got %s", tc.path, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockMapperSetupDevice(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
fakeClient := fakeclient.NewSimpleClientset()
|
||||
host := volumetest.NewFakeVolumeHostWithNodeName(
|
||||
tmpDir,
|
||||
fakeClient,
|
||||
nil,
|
||||
"fakeNode",
|
||||
)
|
||||
plug.host = host
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
pvName := pv.GetName()
|
||||
nodeName := string(plug.host.GetNodeName())
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
|
||||
// MapDevice
|
||||
mapper, err := plug.NewBlockVolumeMapper(
|
||||
spec,
|
||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new mapper: %v", err)
|
||||
}
|
||||
csiMapper := mapper.(*csiBlockMapper)
|
||||
csiMapper.csiClient = setupClient(t, true)
|
||||
|
||||
attachID := getAttachmentName(csiMapper.volumeID, csiMapper.driverName, string(nodeName))
|
||||
attachment := makeTestAttachment(attachID, nodeName, pvName)
|
||||
attachment.Status.Attached = true
|
||||
_, err = csiMapper.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to setup VolumeAttachment: %v", err)
|
||||
}
|
||||
t.Log("created attachement ", attachID)
|
||||
|
||||
devicePath, err := csiMapper.SetUpDevice()
|
||||
if err != nil {
|
||||
t.Fatalf("mapper failed to SetupDevice: %v", err)
|
||||
}
|
||||
|
||||
globalMapPath, err := csiMapper.GetGlobalMapPath(spec)
|
||||
if err != nil {
|
||||
t.Fatalf("mapper failed to GetGlobalMapPath: %v", err)
|
||||
}
|
||||
|
||||
if devicePath != globalMapPath {
|
||||
t.Fatalf("mapper.SetupDevice returned unexpected path %s instead of %v", devicePath, globalMapPath)
|
||||
}
|
||||
|
||||
vols := csiMapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodeStagedVolumes()
|
||||
if vols[csiMapper.volumeID] != devicePath {
|
||||
t.Error("csi server may not have received NodePublishVolume call")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockMapperMapDevice(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
fakeClient := fakeclient.NewSimpleClientset()
|
||||
host := volumetest.NewFakeVolumeHostWithNodeName(
|
||||
tmpDir,
|
||||
fakeClient,
|
||||
nil,
|
||||
"fakeNode",
|
||||
)
|
||||
plug.host = host
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
pvName := pv.GetName()
|
||||
nodeName := string(plug.host.GetNodeName())
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
|
||||
// MapDevice
|
||||
mapper, err := plug.NewBlockVolumeMapper(
|
||||
spec,
|
||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new mapper: %v", err)
|
||||
}
|
||||
csiMapper := mapper.(*csiBlockMapper)
|
||||
csiMapper.csiClient = setupClient(t, true)
|
||||
|
||||
attachID := getAttachmentName(csiMapper.volumeID, csiMapper.driverName, string(nodeName))
|
||||
attachment := makeTestAttachment(attachID, nodeName, pvName)
|
||||
attachment.Status.Attached = true
|
||||
_, err = csiMapper.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to setup VolumeAttachment: %v", err)
|
||||
}
|
||||
t.Log("created attachement ", attachID)
|
||||
|
||||
devicePath, err := csiMapper.SetUpDevice()
|
||||
if err != nil {
|
||||
t.Fatalf("mapper failed to SetupDevice: %v", err)
|
||||
}
|
||||
globalMapPath, err := csiMapper.GetGlobalMapPath(csiMapper.spec)
|
||||
if err != nil {
|
||||
t.Fatalf("mapper failed to GetGlobalMapPath: %v", err)
|
||||
}
|
||||
|
||||
// Map device to global and pod device map path
|
||||
volumeMapPath, volName := csiMapper.GetPodDeviceMapPath()
|
||||
err = csiMapper.MapDevice(devicePath, globalMapPath, volumeMapPath, volName, csiMapper.podUID)
|
||||
if err != nil {
|
||||
t.Fatalf("mapper failed to GetGlobalMapPath: %v", err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(filepath.Join(volumeMapPath, volName)); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("mapper.MapDevice failed, volume path not created: %s", volumeMapPath)
|
||||
} else {
|
||||
t.Errorf("mapper.MapDevice failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
pubs := csiMapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
|
||||
if pubs[csiMapper.volumeID] != volumeMapPath {
|
||||
t.Error("csi server may not have received NodePublishVolume call")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockMapperTearDownDevice(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
fakeClient := fakeclient.NewSimpleClientset()
|
||||
host := volumetest.NewFakeVolumeHostWithNodeName(
|
||||
tmpDir,
|
||||
fakeClient,
|
||||
nil,
|
||||
"fakeNode",
|
||||
)
|
||||
plug.host = host
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
|
||||
// save volume data
|
||||
dir := getVolumeDeviceDataDir(pv.ObjectMeta.Name, plug.host)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
t.Errorf("failed to create dir [%s]: %v", dir, err)
|
||||
}
|
||||
|
||||
if err := saveVolumeData(
|
||||
dir,
|
||||
volDataFileName,
|
||||
map[string]string{
|
||||
volDataKey.specVolID: pv.ObjectMeta.Name,
|
||||
volDataKey.driverName: testDriver,
|
||||
volDataKey.volHandle: testVol,
|
||||
},
|
||||
); err != nil {
|
||||
t.Fatalf("failed to save volume data: %v", err)
|
||||
}
|
||||
|
||||
unmapper, err := plug.NewBlockVolumeUnmapper(pv.ObjectMeta.Name, testPodUID)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to make a new Unmapper: %v", err)
|
||||
}
|
||||
|
||||
csiUnmapper := unmapper.(*csiBlockMapper)
|
||||
csiUnmapper.csiClient = setupClient(t, true)
|
||||
|
||||
globalMapPath, err := csiUnmapper.GetGlobalMapPath(spec)
|
||||
if err != nil {
|
||||
t.Fatalf("unmapper failed to GetGlobalMapPath: %v", err)
|
||||
}
|
||||
|
||||
err = csiUnmapper.TearDownDevice(globalMapPath, "/dev/test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// ensure csi client call and node unstaged
|
||||
vols := csiUnmapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodeStagedVolumes()
|
||||
if _, ok := vols[csiUnmapper.volumeID]; ok {
|
||||
t.Error("csi server may not have received NodeUnstageVolume call")
|
||||
}
|
||||
|
||||
// ensure csi client call and node unpblished
|
||||
pubs := csiUnmapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
|
||||
if _, ok := pubs[csiUnmapper.volumeID]; ok {
|
||||
t.Error("csi server may not have received NodeUnpublishVolume call")
|
||||
}
|
||||
}
|
293
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go
generated
vendored
Normal file
293
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go
generated
vendored
Normal file
@@ -0,0 +1,293 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
|
||||
"github.com/golang/glog"
|
||||
"google.golang.org/grpc"
|
||||
api "k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
type csiClient interface {
|
||||
NodePublishVolume(
|
||||
ctx context.Context,
|
||||
volumeid string,
|
||||
readOnly bool,
|
||||
stagingTargetPath string,
|
||||
targetPath string,
|
||||
accessMode api.PersistentVolumeAccessMode,
|
||||
volumeInfo map[string]string,
|
||||
volumeAttribs map[string]string,
|
||||
nodePublishSecrets map[string]string,
|
||||
fsType string,
|
||||
) error
|
||||
NodeUnpublishVolume(
|
||||
ctx context.Context,
|
||||
volID string,
|
||||
targetPath string,
|
||||
) error
|
||||
NodeStageVolume(ctx context.Context,
|
||||
volID string,
|
||||
publishVolumeInfo map[string]string,
|
||||
stagingTargetPath string,
|
||||
fsType string,
|
||||
accessMode api.PersistentVolumeAccessMode,
|
||||
nodeStageSecrets map[string]string,
|
||||
volumeAttribs map[string]string,
|
||||
) error
|
||||
NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error
|
||||
NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error)
|
||||
}
|
||||
|
||||
// csiClient encapsulates all csi-plugin methods
|
||||
type csiDriverClient struct {
|
||||
driverName string
|
||||
nodeClient csipb.NodeClient
|
||||
}
|
||||
|
||||
var _ csiClient = &csiDriverClient{}
|
||||
|
||||
func newCsiDriverClient(driverName string) *csiDriverClient {
|
||||
c := &csiDriverClient{driverName: driverName}
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *csiDriverClient) NodePublishVolume(
|
||||
ctx context.Context,
|
||||
volID string,
|
||||
readOnly bool,
|
||||
stagingTargetPath string,
|
||||
targetPath string,
|
||||
accessMode api.PersistentVolumeAccessMode,
|
||||
volumeInfo map[string]string,
|
||||
volumeAttribs map[string]string,
|
||||
nodePublishSecrets map[string]string,
|
||||
fsType string,
|
||||
) error {
|
||||
glog.V(4).Info(log("calling NodePublishVolume rpc [volid=%s,target_path=%s]", volID, targetPath))
|
||||
if volID == "" {
|
||||
return errors.New("missing volume id")
|
||||
}
|
||||
if targetPath == "" {
|
||||
return errors.New("missing target path")
|
||||
}
|
||||
|
||||
conn, err := newGrpcConn(c.driverName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
nodeClient := csipb.NewNodeClient(conn)
|
||||
|
||||
req := &csipb.NodePublishVolumeRequest{
|
||||
VolumeId: volID,
|
||||
TargetPath: targetPath,
|
||||
Readonly: readOnly,
|
||||
PublishInfo: volumeInfo,
|
||||
VolumeAttributes: volumeAttribs,
|
||||
NodePublishSecrets: nodePublishSecrets,
|
||||
VolumeCapability: &csipb.VolumeCapability{
|
||||
AccessMode: &csipb.VolumeCapability_AccessMode{
|
||||
Mode: asCSIAccessMode(accessMode),
|
||||
},
|
||||
},
|
||||
}
|
||||
if stagingTargetPath != "" {
|
||||
req.StagingTargetPath = stagingTargetPath
|
||||
}
|
||||
|
||||
if fsType == fsTypeBlockName {
|
||||
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Block{
|
||||
Block: &csipb.VolumeCapability_BlockVolume{},
|
||||
}
|
||||
} else {
|
||||
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Mount{
|
||||
Mount: &csipb.VolumeCapability_MountVolume{
|
||||
FsType: fsType,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
_, err = nodeClient.NodePublishVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *csiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string, targetPath string) error {
|
||||
glog.V(4).Info(log("calling NodeUnpublishVolume rpc: [volid=%s, target_path=%s", volID, targetPath))
|
||||
if volID == "" {
|
||||
return errors.New("missing volume id")
|
||||
}
|
||||
if targetPath == "" {
|
||||
return errors.New("missing target path")
|
||||
}
|
||||
|
||||
conn, err := newGrpcConn(c.driverName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
nodeClient := csipb.NewNodeClient(conn)
|
||||
|
||||
req := &csipb.NodeUnpublishVolumeRequest{
|
||||
VolumeId: volID,
|
||||
TargetPath: targetPath,
|
||||
}
|
||||
|
||||
_, err = nodeClient.NodeUnpublishVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *csiDriverClient) NodeStageVolume(ctx context.Context,
|
||||
volID string,
|
||||
publishInfo map[string]string,
|
||||
stagingTargetPath string,
|
||||
fsType string,
|
||||
accessMode api.PersistentVolumeAccessMode,
|
||||
nodeStageSecrets map[string]string,
|
||||
volumeAttribs map[string]string,
|
||||
) error {
|
||||
glog.V(4).Info(log("calling NodeStageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath))
|
||||
if volID == "" {
|
||||
return errors.New("missing volume id")
|
||||
}
|
||||
if stagingTargetPath == "" {
|
||||
return errors.New("missing staging target path")
|
||||
}
|
||||
|
||||
conn, err := newGrpcConn(c.driverName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
nodeClient := csipb.NewNodeClient(conn)
|
||||
|
||||
req := &csipb.NodeStageVolumeRequest{
|
||||
VolumeId: volID,
|
||||
PublishInfo: publishInfo,
|
||||
StagingTargetPath: stagingTargetPath,
|
||||
VolumeCapability: &csipb.VolumeCapability{
|
||||
AccessMode: &csipb.VolumeCapability_AccessMode{
|
||||
Mode: asCSIAccessMode(accessMode),
|
||||
},
|
||||
},
|
||||
NodeStageSecrets: nodeStageSecrets,
|
||||
VolumeAttributes: volumeAttribs,
|
||||
}
|
||||
|
||||
if fsType == fsTypeBlockName {
|
||||
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Block{
|
||||
Block: &csipb.VolumeCapability_BlockVolume{},
|
||||
}
|
||||
} else {
|
||||
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Mount{
|
||||
Mount: &csipb.VolumeCapability_MountVolume{
|
||||
FsType: fsType,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
_, err = nodeClient.NodeStageVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *csiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error {
|
||||
glog.V(4).Info(log("calling NodeUnstageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath))
|
||||
if volID == "" {
|
||||
return errors.New("missing volume id")
|
||||
}
|
||||
if stagingTargetPath == "" {
|
||||
return errors.New("missing staging target path")
|
||||
}
|
||||
|
||||
conn, err := newGrpcConn(c.driverName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
nodeClient := csipb.NewNodeClient(conn)
|
||||
|
||||
req := &csipb.NodeUnstageVolumeRequest{
|
||||
VolumeId: volID,
|
||||
StagingTargetPath: stagingTargetPath,
|
||||
}
|
||||
_, err = nodeClient.NodeUnstageVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *csiDriverClient) NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error) {
|
||||
glog.V(4).Info(log("calling NodeGetCapabilities rpc"))
|
||||
|
||||
conn, err := newGrpcConn(c.driverName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
nodeClient := csipb.NewNodeClient(conn)
|
||||
|
||||
req := &csipb.NodeGetCapabilitiesRequest{}
|
||||
resp, err := nodeClient.NodeGetCapabilities(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.GetCapabilities(), nil
|
||||
}
|
||||
|
||||
func asCSIAccessMode(am api.PersistentVolumeAccessMode) csipb.VolumeCapability_AccessMode_Mode {
|
||||
switch am {
|
||||
case api.ReadWriteOnce:
|
||||
return csipb.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
|
||||
case api.ReadOnlyMany:
|
||||
return csipb.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY
|
||||
case api.ReadWriteMany:
|
||||
return csipb.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
|
||||
}
|
||||
return csipb.VolumeCapability_AccessMode_UNKNOWN
|
||||
}
|
||||
|
||||
func newGrpcConn(driverName string) (*grpc.ClientConn, error) {
|
||||
if driverName == "" {
|
||||
return nil, fmt.Errorf("driver name is empty")
|
||||
}
|
||||
addr := fmt.Sprintf(csiAddrTemplate, driverName)
|
||||
// TODO once KubeletPluginsWatcher graduates to beta, remove FeatureGate check
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPluginsWatcher) {
|
||||
driver, ok := csiDrivers.driversMap[driverName]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("driver name %s not found in the list of registered CSI drivers", driverName)
|
||||
}
|
||||
addr = driver.driverEndpoint
|
||||
}
|
||||
network := "unix"
|
||||
glog.V(4).Infof(log("creating new gRPC connection for [%s://%s]", network, addr))
|
||||
|
||||
return grpc.Dial(
|
||||
addr,
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithDialer(func(target string, timeout time.Duration) (net.Conn, error) {
|
||||
return net.Dial(network, target)
|
||||
}),
|
||||
)
|
||||
}
|
276
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client_test.go
generated
vendored
Normal file
276
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client_test.go
generated
vendored
Normal file
@@ -0,0 +1,276 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
|
||||
api "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/volume/csi/fake"
|
||||
)
|
||||
|
||||
type fakeCsiDriverClient struct {
|
||||
t *testing.T
|
||||
nodeClient *fake.NodeClient
|
||||
}
|
||||
|
||||
func newFakeCsiDriverClient(t *testing.T, stagingCapable bool) *fakeCsiDriverClient {
|
||||
return &fakeCsiDriverClient{
|
||||
t: t,
|
||||
nodeClient: fake.NewNodeClient(stagingCapable),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *fakeCsiDriverClient) NodePublishVolume(
|
||||
ctx context.Context,
|
||||
volID string,
|
||||
readOnly bool,
|
||||
stagingTargetPath string,
|
||||
targetPath string,
|
||||
accessMode api.PersistentVolumeAccessMode,
|
||||
volumeInfo map[string]string,
|
||||
volumeAttribs map[string]string,
|
||||
nodePublishSecrets map[string]string,
|
||||
fsType string,
|
||||
) error {
|
||||
c.t.Log("calling fake.NodePublishVolume...")
|
||||
req := &csipb.NodePublishVolumeRequest{
|
||||
VolumeId: volID,
|
||||
TargetPath: targetPath,
|
||||
Readonly: readOnly,
|
||||
PublishInfo: volumeInfo,
|
||||
VolumeAttributes: volumeAttribs,
|
||||
NodePublishSecrets: nodePublishSecrets,
|
||||
VolumeCapability: &csipb.VolumeCapability{
|
||||
AccessMode: &csipb.VolumeCapability_AccessMode{
|
||||
Mode: asCSIAccessMode(accessMode),
|
||||
},
|
||||
AccessType: &csipb.VolumeCapability_Mount{
|
||||
Mount: &csipb.VolumeCapability_MountVolume{
|
||||
FsType: fsType,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := c.nodeClient.NodePublishVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *fakeCsiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string, targetPath string) error {
|
||||
c.t.Log("calling fake.NodeUnpublishVolume...")
|
||||
req := &csipb.NodeUnpublishVolumeRequest{
|
||||
VolumeId: volID,
|
||||
TargetPath: targetPath,
|
||||
}
|
||||
|
||||
_, err := c.nodeClient.NodeUnpublishVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *fakeCsiDriverClient) NodeStageVolume(ctx context.Context,
|
||||
volID string,
|
||||
publishInfo map[string]string,
|
||||
stagingTargetPath string,
|
||||
fsType string,
|
||||
accessMode api.PersistentVolumeAccessMode,
|
||||
nodeStageSecrets map[string]string,
|
||||
volumeAttribs map[string]string,
|
||||
) error {
|
||||
c.t.Log("calling fake.NodeStageVolume...")
|
||||
req := &csipb.NodeStageVolumeRequest{
|
||||
VolumeId: volID,
|
||||
PublishInfo: publishInfo,
|
||||
StagingTargetPath: stagingTargetPath,
|
||||
VolumeCapability: &csipb.VolumeCapability{
|
||||
AccessMode: &csipb.VolumeCapability_AccessMode{
|
||||
Mode: asCSIAccessMode(accessMode),
|
||||
},
|
||||
AccessType: &csipb.VolumeCapability_Mount{
|
||||
Mount: &csipb.VolumeCapability_MountVolume{
|
||||
FsType: fsType,
|
||||
},
|
||||
},
|
||||
},
|
||||
NodeStageSecrets: nodeStageSecrets,
|
||||
VolumeAttributes: volumeAttribs,
|
||||
}
|
||||
|
||||
_, err := c.nodeClient.NodeStageVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *fakeCsiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error {
|
||||
c.t.Log("calling fake.NodeUnstageVolume...")
|
||||
req := &csipb.NodeUnstageVolumeRequest{
|
||||
VolumeId: volID,
|
||||
StagingTargetPath: stagingTargetPath,
|
||||
}
|
||||
_, err := c.nodeClient.NodeUnstageVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *fakeCsiDriverClient) NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error) {
|
||||
c.t.Log("calling fake.NodeGetCapabilities...")
|
||||
req := &csipb.NodeGetCapabilitiesRequest{}
|
||||
resp, err := c.nodeClient.NodeGetCapabilities(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.GetCapabilities(), nil
|
||||
}
|
||||
|
||||
func setupClient(t *testing.T, stageUnstageSet bool) csiClient {
|
||||
return newFakeCsiDriverClient(t, stageUnstageSet)
|
||||
}
|
||||
|
||||
func TestClientNodePublishVolume(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
volID string
|
||||
targetPath string
|
||||
fsType string
|
||||
mustFail bool
|
||||
err error
|
||||
}{
|
||||
{name: "test ok", volID: "vol-test", targetPath: "/test/path"},
|
||||
{name: "missing volID", targetPath: "/test/path", mustFail: true},
|
||||
{name: "missing target path", volID: "vol-test", mustFail: true},
|
||||
{name: "bad fs", volID: "vol-test", targetPath: "/test/path", fsType: "badfs", mustFail: true},
|
||||
{name: "grpc error", volID: "vol-test", targetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
|
||||
}
|
||||
|
||||
client := setupClient(t, false)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("test case: %s", tc.name)
|
||||
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
|
||||
err := client.NodePublishVolume(
|
||||
context.Background(),
|
||||
tc.volID,
|
||||
false,
|
||||
"",
|
||||
tc.targetPath,
|
||||
api.ReadWriteOnce,
|
||||
map[string]string{"device": "/dev/null"},
|
||||
map[string]string{"attr0": "val0"},
|
||||
map[string]string{},
|
||||
tc.fsType,
|
||||
)
|
||||
|
||||
if tc.mustFail && err == nil {
|
||||
t.Error("test must fail, but err is nil")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientNodeUnpublishVolume(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
volID string
|
||||
targetPath string
|
||||
mustFail bool
|
||||
err error
|
||||
}{
|
||||
{name: "test ok", volID: "vol-test", targetPath: "/test/path"},
|
||||
{name: "missing volID", targetPath: "/test/path", mustFail: true},
|
||||
{name: "missing target path", volID: "vol-test", mustFail: true},
|
||||
{name: "grpc error", volID: "vol-test", targetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
|
||||
}
|
||||
|
||||
client := setupClient(t, false)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("test case: %s", tc.name)
|
||||
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
|
||||
err := client.NodeUnpublishVolume(context.Background(), tc.volID, tc.targetPath)
|
||||
if tc.mustFail && err == nil {
|
||||
t.Error("test must fail, but err is nil")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientNodeStageVolume(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
volID string
|
||||
stagingTargetPath string
|
||||
fsType string
|
||||
secret map[string]string
|
||||
mustFail bool
|
||||
err error
|
||||
}{
|
||||
{name: "test ok", volID: "vol-test", stagingTargetPath: "/test/path", fsType: "ext4"},
|
||||
{name: "missing volID", stagingTargetPath: "/test/path", mustFail: true},
|
||||
{name: "missing target path", volID: "vol-test", mustFail: true},
|
||||
{name: "bad fs", volID: "vol-test", stagingTargetPath: "/test/path", fsType: "badfs", mustFail: true},
|
||||
{name: "grpc error", volID: "vol-test", stagingTargetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
|
||||
}
|
||||
|
||||
client := setupClient(t, false)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("Running test case: %s", tc.name)
|
||||
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
|
||||
err := client.NodeStageVolume(
|
||||
context.Background(),
|
||||
tc.volID,
|
||||
map[string]string{"device": "/dev/null"},
|
||||
tc.stagingTargetPath,
|
||||
tc.fsType,
|
||||
api.ReadWriteOnce,
|
||||
tc.secret,
|
||||
map[string]string{"attr0": "val0"},
|
||||
)
|
||||
|
||||
if tc.mustFail && err == nil {
|
||||
t.Error("test must fail, but err is nil")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientNodeUnstageVolume(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
volID string
|
||||
stagingTargetPath string
|
||||
mustFail bool
|
||||
err error
|
||||
}{
|
||||
{name: "test ok", volID: "vol-test", stagingTargetPath: "/test/path"},
|
||||
{name: "missing volID", stagingTargetPath: "/test/path", mustFail: true},
|
||||
{name: "missing target path", volID: "vol-test", mustFail: true},
|
||||
{name: "grpc error", volID: "vol-test", stagingTargetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
|
||||
}
|
||||
|
||||
client := setupClient(t, false)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("Running test case: %s", tc.name)
|
||||
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
|
||||
err := client.NodeUnstageVolume(
|
||||
context.Background(),
|
||||
tc.volID, tc.stagingTargetPath,
|
||||
)
|
||||
if tc.mustFail && err == nil {
|
||||
t.Error("test must fail, but err is nil")
|
||||
}
|
||||
}
|
||||
}
|
346
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go
generated
vendored
Normal file
346
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go
generated
vendored
Normal file
@@ -0,0 +1,346 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
api "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
const defaultFSType = "ext4"
|
||||
|
||||
//TODO (vladimirvivien) move this in a central loc later
|
||||
var (
|
||||
volDataKey = struct {
|
||||
specVolID,
|
||||
volHandle,
|
||||
driverName,
|
||||
nodeName,
|
||||
attachmentID string
|
||||
}{
|
||||
"specVolID",
|
||||
"volumeHandle",
|
||||
"driverName",
|
||||
"nodeName",
|
||||
"attachmentID",
|
||||
}
|
||||
)
|
||||
|
||||
type csiMountMgr struct {
|
||||
csiClient csiClient
|
||||
k8s kubernetes.Interface
|
||||
plugin *csiPlugin
|
||||
driverName string
|
||||
volumeID string
|
||||
specVolumeID string
|
||||
readOnly bool
|
||||
spec *volume.Spec
|
||||
pod *api.Pod
|
||||
podUID types.UID
|
||||
options volume.VolumeOptions
|
||||
volumeInfo map[string]string
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
// volume.Volume methods
|
||||
var _ volume.Volume = &csiMountMgr{}
|
||||
|
||||
func (c *csiMountMgr) GetPath() string {
|
||||
dir := path.Join(getTargetPath(c.podUID, c.specVolumeID, c.plugin.host), "/mount")
|
||||
glog.V(4).Info(log("mounter.GetPath generated [%s]", dir))
|
||||
return dir
|
||||
}
|
||||
|
||||
func getTargetPath(uid types.UID, specVolumeID string, host volume.VolumeHost) string {
|
||||
specVolID := kstrings.EscapeQualifiedNameForDisk(specVolumeID)
|
||||
return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(csiPluginName), specVolID)
|
||||
}
|
||||
|
||||
// volume.Mounter methods
|
||||
var _ volume.Mounter = &csiMountMgr{}
|
||||
|
||||
func (c *csiMountMgr) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *csiMountMgr) SetUp(fsGroup *int64) error {
|
||||
return c.SetUpAt(c.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
|
||||
glog.V(4).Infof(log("Mounter.SetUpAt(%s)", dir))
|
||||
|
||||
mounted, err := isDirMounted(c.plugin, dir)
|
||||
if err != nil {
|
||||
glog.Error(log("mounter.SetUpAt failed while checking mount status for dir [%s]", dir))
|
||||
return err
|
||||
}
|
||||
|
||||
if mounted {
|
||||
glog.V(4).Info(log("mounter.SetUpAt skipping mount, dir already mounted [%s]", dir))
|
||||
return nil
|
||||
}
|
||||
|
||||
csiSource, err := getCSISourceFromSpec(c.spec)
|
||||
if err != nil {
|
||||
glog.Error(log("mounter.SetupAt failed to get CSI persistent source: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
csi := c.csiClient
|
||||
nodeName := string(c.plugin.host.GetNodeName())
|
||||
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Check for STAGE_UNSTAGE_VOLUME set and populate deviceMountPath if so
|
||||
deviceMountPath := ""
|
||||
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
|
||||
if err != nil {
|
||||
glog.Error(log("mounter.SetUpAt failed to check for STAGE_UNSTAGE_VOLUME capabilty: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
if stageUnstageSet {
|
||||
deviceMountPath, err = makeDeviceMountPath(c.plugin, c.spec)
|
||||
if err != nil {
|
||||
glog.Error(log("mounter.SetUpAt failed to make device mount path: %v", err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
|
||||
if c.volumeInfo == nil {
|
||||
attachment, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Error(log("mounter.SetupAt failed while getting volume attachment [id=%v]: %v", attachID, err))
|
||||
return err
|
||||
}
|
||||
|
||||
if attachment == nil {
|
||||
glog.Error(log("unable to find VolumeAttachment [id=%s]", attachID))
|
||||
return errors.New("no existing VolumeAttachment found")
|
||||
}
|
||||
c.volumeInfo = attachment.Status.AttachmentMetadata
|
||||
}
|
||||
|
||||
attribs := csiSource.VolumeAttributes
|
||||
|
||||
nodePublishSecrets := map[string]string{}
|
||||
if csiSource.NodePublishSecretRef != nil {
|
||||
nodePublishSecrets, err = getCredentialsFromSecret(c.k8s, csiSource.NodePublishSecretRef)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching NodePublishSecretRef %s/%s failed: %v",
|
||||
csiSource.NodePublishSecretRef.Namespace, csiSource.NodePublishSecretRef.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// create target_dir before call to NodePublish
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
glog.Error(log("mouter.SetUpAt failed to create dir %#v: %v", dir, err))
|
||||
return err
|
||||
}
|
||||
glog.V(4).Info(log("created target path successfully [%s]", dir))
|
||||
|
||||
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
|
||||
accessMode := api.ReadWriteOnce
|
||||
if c.spec.PersistentVolume.Spec.AccessModes != nil {
|
||||
accessMode = c.spec.PersistentVolume.Spec.AccessModes[0]
|
||||
}
|
||||
|
||||
fsType := csiSource.FSType
|
||||
if len(fsType) == 0 {
|
||||
fsType = defaultFSType
|
||||
}
|
||||
err = csi.NodePublishVolume(
|
||||
ctx,
|
||||
c.volumeID,
|
||||
c.readOnly,
|
||||
deviceMountPath,
|
||||
dir,
|
||||
accessMode,
|
||||
c.volumeInfo,
|
||||
attribs,
|
||||
nodePublishSecrets,
|
||||
fsType,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf(log("mounter.SetupAt failed: %v", err))
|
||||
if removeMountDirErr := removeMountDir(c.plugin, dir); removeMountDirErr != nil {
|
||||
glog.Error(log("mounter.SetupAt failed to remove mount dir after a NodePublish() error [%s]: %v", dir, removeMountDirErr))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// apply volume ownership
|
||||
if !c.readOnly && fsGroup != nil {
|
||||
err := volume.SetVolumeOwnership(c, fsGroup)
|
||||
if err != nil {
|
||||
// attempt to rollback mount.
|
||||
glog.Error(log("mounter.SetupAt failed to set fsgroup volume ownership for [%s]: %v", c.volumeID, err))
|
||||
glog.V(4).Info(log("mounter.SetupAt attempting to unpublish volume %s due to previous error", c.volumeID))
|
||||
if unpubErr := csi.NodeUnpublishVolume(ctx, c.volumeID, dir); unpubErr != nil {
|
||||
glog.Error(log(
|
||||
"mounter.SetupAt failed to unpublish volume [%s]: %v (caused by previous NodePublish error: %v)",
|
||||
c.volumeID, unpubErr, err,
|
||||
))
|
||||
return fmt.Errorf("%v (caused by %v)", unpubErr, err)
|
||||
}
|
||||
|
||||
if unmountErr := removeMountDir(c.plugin, dir); unmountErr != nil {
|
||||
glog.Error(log(
|
||||
"mounter.SetupAt failed to clean mount dir [%s]: %v (caused by previous NodePublish error: %v)",
|
||||
dir, unmountErr, err,
|
||||
))
|
||||
return fmt.Errorf("%v (caused by %v)", unmountErr, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
glog.V(4).Info(log("mounter.SetupAt sets fsGroup to [%d] for %s", *fsGroup, c.volumeID))
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *csiMountMgr) GetAttributes() volume.Attributes {
|
||||
mounter := c.plugin.host.GetMounter(c.plugin.GetPluginName())
|
||||
path := c.GetPath()
|
||||
supportSelinux, err := mounter.GetSELinuxSupport(path)
|
||||
if err != nil {
|
||||
glog.V(2).Info(log("error checking for SELinux support: %s", err))
|
||||
// Best guess
|
||||
supportSelinux = false
|
||||
}
|
||||
return volume.Attributes{
|
||||
ReadOnly: c.readOnly,
|
||||
Managed: !c.readOnly,
|
||||
SupportsSELinux: supportSelinux,
|
||||
}
|
||||
}
|
||||
|
||||
// volume.Unmounter methods
|
||||
var _ volume.Unmounter = &csiMountMgr{}
|
||||
|
||||
func (c *csiMountMgr) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
func (c *csiMountMgr) TearDownAt(dir string) error {
|
||||
glog.V(4).Infof(log("Unmounter.TearDown(%s)", dir))
|
||||
|
||||
// is dir even mounted ?
|
||||
// TODO (vladimirvivien) this check may not work for an emptyDir or local storage
|
||||
// see https://github.com/kubernetes/kubernetes/pull/56836#discussion_r155834524
|
||||
mounted, err := isDirMounted(c.plugin, dir)
|
||||
if err != nil {
|
||||
glog.Error(log("unmounter.Teardown failed while checking mount status for dir [%s]: %v", dir, err))
|
||||
return err
|
||||
}
|
||||
|
||||
if !mounted {
|
||||
glog.V(4).Info(log("unmounter.Teardown skipping unmount, dir not mounted [%s]", dir))
|
||||
return nil
|
||||
}
|
||||
|
||||
volID := c.volumeID
|
||||
csi := c.csiClient
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
|
||||
if err := csi.NodeUnpublishVolume(ctx, volID, dir); err != nil {
|
||||
glog.Errorf(log("mounter.TearDownAt failed: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
// clean mount point dir
|
||||
if err := removeMountDir(c.plugin, dir); err != nil {
|
||||
glog.Error(log("mounter.TearDownAt failed to clean mount dir [%s]: %v", dir, err))
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof(log("mounte.TearDownAt successfully unmounted dir [%s]", dir))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isDirMounted returns the !notMounted result from IsLikelyNotMountPoint check
|
||||
func isDirMounted(plug *csiPlugin, dir string) (bool, error) {
|
||||
mounter := plug.host.GetMounter(plug.GetPluginName())
|
||||
notMnt, err := mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
glog.Error(log("isDirMounted IsLikelyNotMountPoint test failed for dir [%v]", dir))
|
||||
return false, err
|
||||
}
|
||||
return !notMnt, nil
|
||||
}
|
||||
|
||||
// removeMountDir cleans the mount dir when dir is not mounted and removed the volume data file in dir
|
||||
func removeMountDir(plug *csiPlugin, mountPath string) error {
|
||||
glog.V(4).Info(log("removing mount path [%s]", mountPath))
|
||||
if pathExists, pathErr := util.PathExists(mountPath); pathErr != nil {
|
||||
glog.Error(log("failed while checking mount path stat [%s]", pathErr))
|
||||
return pathErr
|
||||
} else if !pathExists {
|
||||
glog.Warning(log("skipping mount dir removal, path does not exist [%v]", mountPath))
|
||||
return nil
|
||||
}
|
||||
|
||||
mounter := plug.host.GetMounter(plug.GetPluginName())
|
||||
notMnt, err := mounter.IsLikelyNotMountPoint(mountPath)
|
||||
if err != nil {
|
||||
glog.Error(log("mount dir removal failed [%s]: %v", mountPath, err))
|
||||
return err
|
||||
}
|
||||
if notMnt {
|
||||
glog.V(4).Info(log("dir not mounted, deleting it [%s]", mountPath))
|
||||
if err := os.Remove(mountPath); err != nil && !os.IsNotExist(err) {
|
||||
glog.Error(log("failed to remove dir [%s]: %v", mountPath, err))
|
||||
return err
|
||||
}
|
||||
// remove volume data file as well
|
||||
volPath := path.Dir(mountPath)
|
||||
dataFile := path.Join(volPath, volDataFileName)
|
||||
glog.V(4).Info(log("also deleting volume info data file [%s]", dataFile))
|
||||
if err := os.Remove(dataFile); err != nil && !os.IsNotExist(err) {
|
||||
glog.Error(log("failed to delete volume data file [%s]: %v", dataFile, err))
|
||||
return err
|
||||
}
|
||||
// remove volume path
|
||||
glog.V(4).Info(log("deleting volume path [%s]", volPath))
|
||||
if err := os.Remove(volPath); err != nil && !os.IsNotExist(err) {
|
||||
glog.Error(log("failed to delete volume path [%s]: %v", volPath, err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
258
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter_test.go
generated
vendored
Normal file
258
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter_test.go
generated
vendored
Normal file
@@ -0,0 +1,258 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
api "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1beta1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
fakeclient "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
var (
|
||||
testDriver = "test-driver"
|
||||
testVol = "vol-123"
|
||||
testns = "test-ns"
|
||||
testPodUID = types.UID("test-pod")
|
||||
)
|
||||
|
||||
func TestMounterGetPath(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// TODO (vladimirvivien) specName with slashes will not work
|
||||
testCases := []struct {
|
||||
name string
|
||||
specVolumeName string
|
||||
path string
|
||||
}{
|
||||
{
|
||||
name: "simple specName",
|
||||
specVolumeName: "spec-0",
|
||||
path: path.Join(tmpDir, fmt.Sprintf("pods/%s/volumes/kubernetes.io~csi/%s/%s", testPodUID, "spec-0", "/mount")),
|
||||
},
|
||||
{
|
||||
name: "specName with dots",
|
||||
specVolumeName: "test.spec.1",
|
||||
path: path.Join(tmpDir, fmt.Sprintf("pods/%s/volumes/kubernetes.io~csi/%s/%s", testPodUID, "test.spec.1", "/mount")),
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Logf("test case: %s", tc.name)
|
||||
pv := makeTestPV(tc.specVolumeName, 10, testDriver, testVol)
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
mounter, err := plug.NewMounter(
|
||||
spec,
|
||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
csiMounter := mounter.(*csiMountMgr)
|
||||
|
||||
path := csiMounter.GetPath()
|
||||
|
||||
if tc.path != path {
|
||||
t.Errorf("expecting path %s, got %s", tc.path, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMounterSetUp(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
fakeClient := fakeclient.NewSimpleClientset()
|
||||
host := volumetest.NewFakeVolumeHostWithNodeName(
|
||||
tmpDir,
|
||||
fakeClient,
|
||||
nil,
|
||||
"fakeNode",
|
||||
)
|
||||
plug.host = host
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
pvName := pv.GetName()
|
||||
|
||||
mounter, err := plug.NewMounter(
|
||||
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
|
||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
|
||||
if mounter == nil {
|
||||
t.Fatal("failed to create CSI mounter")
|
||||
}
|
||||
|
||||
csiMounter := mounter.(*csiMountMgr)
|
||||
csiMounter.csiClient = setupClient(t, true)
|
||||
|
||||
attachID := getAttachmentName(csiMounter.volumeID, csiMounter.driverName, string(plug.host.GetNodeName()))
|
||||
|
||||
attachment := &storage.VolumeAttachment{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: attachID,
|
||||
},
|
||||
Spec: storage.VolumeAttachmentSpec{
|
||||
NodeName: "test-node",
|
||||
Attacher: csiPluginName,
|
||||
Source: storage.VolumeAttachmentSource{
|
||||
PersistentVolumeName: &pvName,
|
||||
},
|
||||
},
|
||||
Status: storage.VolumeAttachmentStatus{
|
||||
Attached: false,
|
||||
AttachError: nil,
|
||||
DetachError: nil,
|
||||
},
|
||||
}
|
||||
_, err = csiMounter.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to setup VolumeAttachment: %v", err)
|
||||
}
|
||||
|
||||
// Mounter.SetUp()
|
||||
fsGroup := int64(2000)
|
||||
if err := csiMounter.SetUp(&fsGroup); err != nil {
|
||||
t.Fatalf("mounter.Setup failed: %v", err)
|
||||
}
|
||||
path := csiMounter.GetPath()
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ensure call went all the way
|
||||
pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
|
||||
if pubs[csiMounter.volumeID] != csiMounter.GetPath() {
|
||||
t.Error("csi server may not have received NodePublishVolume call")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmounterTeardown(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
|
||||
// save the data file prior to unmount
|
||||
dir := path.Join(getTargetPath(testPodUID, pv.ObjectMeta.Name, plug.host), "/mount")
|
||||
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
t.Errorf("failed to create dir [%s]: %v", dir, err)
|
||||
}
|
||||
|
||||
// do a fake local mount
|
||||
diskMounter := util.NewSafeFormatAndMountFromHost(plug.GetPluginName(), plug.host)
|
||||
if err := diskMounter.FormatAndMount("/fake/device", dir, "testfs", nil); err != nil {
|
||||
t.Errorf("failed to mount dir [%s]: %v", dir, err)
|
||||
}
|
||||
|
||||
if err := saveVolumeData(
|
||||
path.Dir(dir),
|
||||
volDataFileName,
|
||||
map[string]string{
|
||||
volDataKey.specVolID: pv.ObjectMeta.Name,
|
||||
volDataKey.driverName: testDriver,
|
||||
volDataKey.volHandle: testVol,
|
||||
},
|
||||
); err != nil {
|
||||
t.Fatalf("failed to save volume data: %v", err)
|
||||
}
|
||||
|
||||
unmounter, err := plug.NewUnmounter(pv.ObjectMeta.Name, testPodUID)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
|
||||
csiUnmounter := unmounter.(*csiMountMgr)
|
||||
csiUnmounter.csiClient = setupClient(t, true)
|
||||
err = csiUnmounter.TearDownAt(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// ensure csi client call
|
||||
pubs := csiUnmounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
|
||||
if _, ok := pubs[csiUnmounter.volumeID]; ok {
|
||||
t.Error("csi server may not have received NodeUnpublishVolume call")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSaveVolumeData(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
testCases := []struct {
|
||||
name string
|
||||
data map[string]string
|
||||
shouldFail bool
|
||||
}{
|
||||
{name: "test with data ok", data: map[string]string{"key0": "val0", "_key1": "val1", "key2": "val2"}},
|
||||
{name: "test with data ok 2 ", data: map[string]string{"_key0_": "val0", "&key1": "val1", "key2": "val2"}},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Logf("test case: %s", tc.name)
|
||||
specVolID := fmt.Sprintf("spec-volid-%d", i)
|
||||
mountDir := path.Join(getTargetPath(testPodUID, specVolID, plug.host), "/mount")
|
||||
if err := os.MkdirAll(mountDir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
t.Errorf("failed to create dir [%s]: %v", mountDir, err)
|
||||
}
|
||||
|
||||
err := saveVolumeData(path.Dir(mountDir), volDataFileName, tc.data)
|
||||
|
||||
if !tc.shouldFail && err != nil {
|
||||
t.Errorf("unexpected failure: %v", err)
|
||||
}
|
||||
// did file get created
|
||||
dataDir := getTargetPath(testPodUID, specVolID, plug.host)
|
||||
file := path.Join(dataDir, volDataFileName)
|
||||
if _, err := os.Stat(file); err != nil {
|
||||
t.Errorf("failed to create data dir: %v", err)
|
||||
}
|
||||
|
||||
// validate content
|
||||
data, err := ioutil.ReadFile(file)
|
||||
if !tc.shouldFail && err != nil {
|
||||
t.Errorf("failed to read data file: %v", err)
|
||||
}
|
||||
|
||||
jsonData := new(bytes.Buffer)
|
||||
if err := json.NewEncoder(jsonData).Encode(tc.data); err != nil {
|
||||
t.Errorf("failed to encode json: %v", err)
|
||||
}
|
||||
if string(data) != jsonData.String() {
|
||||
t.Errorf("expecting encoded data %v, got %v", string(data), jsonData)
|
||||
}
|
||||
}
|
||||
}
|
443
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go
generated
vendored
Normal file
443
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go
generated
vendored
Normal file
@@ -0,0 +1,443 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
api "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/csi/labelmanager"
|
||||
)
|
||||
|
||||
const (
|
||||
csiPluginName = "kubernetes.io/csi"
|
||||
|
||||
// TODO (vladimirvivien) implement a more dynamic way to discover
|
||||
// the unix domain socket path for each installed csi driver.
|
||||
// TODO (vladimirvivien) would be nice to name socket with a .sock extension
|
||||
// for consistency.
|
||||
csiAddrTemplate = "/var/lib/kubelet/plugins/%v/csi.sock"
|
||||
csiTimeout = 15 * time.Second
|
||||
volNameSep = "^"
|
||||
volDataFileName = "vol_data.json"
|
||||
fsTypeBlockName = "block"
|
||||
)
|
||||
|
||||
type csiPlugin struct {
|
||||
host volume.VolumeHost
|
||||
blockEnabled bool
|
||||
}
|
||||
|
||||
// ProbeVolumePlugins returns implemented plugins
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
p := &csiPlugin{
|
||||
host: nil,
|
||||
blockEnabled: utilfeature.DefaultFeatureGate.Enabled(features.CSIBlockVolume),
|
||||
}
|
||||
return []volume.VolumePlugin{p}
|
||||
}
|
||||
|
||||
// volume.VolumePlugin methods
|
||||
var _ volume.VolumePlugin = &csiPlugin{}
|
||||
|
||||
type csiDriver struct {
|
||||
driverName string
|
||||
driverEndpoint string
|
||||
}
|
||||
|
||||
type csiDriversStore struct {
|
||||
driversMap map[string]csiDriver
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// csiDrivers map keep track of all registered CSI drivers on the node and their
|
||||
// corresponding sockets
|
||||
var csiDrivers csiDriversStore
|
||||
|
||||
var lm labelmanager.Interface
|
||||
|
||||
// RegistrationCallback is called by kubelet's plugin watcher upon detection
|
||||
// of a new registration socket opened by CSI Driver registrar side car.
|
||||
func RegistrationCallback(pluginName string, endpoint string, versions []string, socketPath string) (error, chan bool) {
|
||||
|
||||
glog.Infof(log("Callback from kubelet with plugin name: %s endpoint: %s versions: %s socket path: %s",
|
||||
pluginName, endpoint, strings.Join(versions, ","), socketPath))
|
||||
|
||||
if endpoint == "" {
|
||||
endpoint = socketPath
|
||||
}
|
||||
// Calling nodeLabelManager to update label for newly registered CSI driver
|
||||
err := lm.AddLabels(pluginName)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
// Storing endpoint of newly registered CSI driver into the map, where CSI driver name will be the key
|
||||
// all other CSI components will be able to get the actual socket of CSI drivers by its name.
|
||||
csiDrivers.Lock()
|
||||
defer csiDrivers.Unlock()
|
||||
csiDrivers.driversMap[pluginName] = csiDriver{driverName: pluginName, driverEndpoint: endpoint}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) Init(host volume.VolumeHost) error {
|
||||
glog.Info(log("plugin initializing..."))
|
||||
p.host = host
|
||||
|
||||
// Initializing csiDrivers map and label management channels
|
||||
csiDrivers = csiDriversStore{driversMap: map[string]csiDriver{}}
|
||||
lm = labelmanager.NewLabelManager(host.GetNodeName(), host.GetKubeClient())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) GetPluginName() string {
|
||||
return csiPluginName
|
||||
}
|
||||
|
||||
// GetvolumeName returns a concatenated string of CSIVolumeSource.Driver<volNameSe>CSIVolumeSource.VolumeHandle
|
||||
// That string value is used in Detach() to extract driver name and volumeName.
|
||||
func (p *csiPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
csi, err := getCSISourceFromSpec(spec)
|
||||
if err != nil {
|
||||
glog.Error(log("plugin.GetVolumeName failed to extract volume source from spec: %v", err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
// return driverName<separator>volumeHandle
|
||||
return fmt.Sprintf("%s%s%s", csi.Driver, volNameSep, csi.VolumeHandle), nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
// TODO (vladimirvivien) CanSupport should also take into account
|
||||
// the availability/registration of specified Driver in the volume source
|
||||
return spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CSI != nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *csiPlugin) NewMounter(
|
||||
spec *volume.Spec,
|
||||
pod *api.Pod,
|
||||
_ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
pvSource, err := getCSISourceFromSpec(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
readOnly, err := getReadOnlyFromSpec(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k8s := p.host.GetKubeClient()
|
||||
if k8s == nil {
|
||||
glog.Error(log("failed to get a kubernetes client"))
|
||||
return nil, errors.New("failed to get a Kubernetes client")
|
||||
}
|
||||
|
||||
csi := newCsiDriverClient(pvSource.Driver)
|
||||
|
||||
mounter := &csiMountMgr{
|
||||
plugin: p,
|
||||
k8s: k8s,
|
||||
spec: spec,
|
||||
pod: pod,
|
||||
podUID: pod.UID,
|
||||
driverName: pvSource.Driver,
|
||||
volumeID: pvSource.VolumeHandle,
|
||||
specVolumeID: spec.Name(),
|
||||
csiClient: csi,
|
||||
readOnly: readOnly,
|
||||
}
|
||||
|
||||
// Save volume info in pod dir
|
||||
dir := mounter.GetPath()
|
||||
dataDir := path.Dir(dir) // dropoff /mount at end
|
||||
|
||||
if err := os.MkdirAll(dataDir, 0750); err != nil {
|
||||
glog.Error(log("failed to create dir %#v: %v", dataDir, err))
|
||||
return nil, err
|
||||
}
|
||||
glog.V(4).Info(log("created path successfully [%s]", dataDir))
|
||||
|
||||
// persist volume info data for teardown
|
||||
node := string(p.host.GetNodeName())
|
||||
attachID := getAttachmentName(pvSource.VolumeHandle, pvSource.Driver, node)
|
||||
volData := map[string]string{
|
||||
volDataKey.specVolID: spec.Name(),
|
||||
volDataKey.volHandle: pvSource.VolumeHandle,
|
||||
volDataKey.driverName: pvSource.Driver,
|
||||
volDataKey.nodeName: node,
|
||||
volDataKey.attachmentID: attachID,
|
||||
}
|
||||
|
||||
if err := saveVolumeData(dataDir, volDataFileName, volData); err != nil {
|
||||
glog.Error(log("failed to save volume info data: %v", err))
|
||||
if err := os.RemoveAll(dataDir); err != nil {
|
||||
glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err))
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Info(log("mounter created successfully"))
|
||||
|
||||
return mounter, nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
glog.V(4).Infof(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID))
|
||||
|
||||
unmounter := &csiMountMgr{
|
||||
plugin: p,
|
||||
podUID: podUID,
|
||||
specVolumeID: specName,
|
||||
}
|
||||
|
||||
// load volume info from file
|
||||
dir := unmounter.GetPath()
|
||||
dataDir := path.Dir(dir) // dropoff /mount at end
|
||||
data, err := loadVolumeData(dataDir, volDataFileName)
|
||||
if err != nil {
|
||||
glog.Error(log("unmounter failed to load volume data file [%s]: %v", dir, err))
|
||||
return nil, err
|
||||
}
|
||||
unmounter.driverName = data[volDataKey.driverName]
|
||||
unmounter.volumeID = data[volDataKey.volHandle]
|
||||
unmounter.csiClient = newCsiDriverClient(unmounter.driverName)
|
||||
|
||||
return unmounter, nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
glog.V(4).Info(log("plugin.ConstructVolumeSpec [pv.Name=%v, path=%v]", volumeName, mountPath))
|
||||
|
||||
volData, err := loadVolumeData(mountPath, volDataFileName)
|
||||
if err != nil {
|
||||
glog.Error(log("plugin.ConstructVolumeSpec failed loading volume data using [%s]: %v", mountPath, err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Info(log("plugin.ConstructVolumeSpec extracted [%#v]", volData))
|
||||
|
||||
pv := &api.PersistentVolume{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: volData[volDataKey.specVolID],
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
CSI: &api.CSIPersistentVolumeSource{
|
||||
Driver: volData[volDataKey.driverName],
|
||||
VolumeHandle: volData[volDataKey.volHandle],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return volume.NewSpecFromPersistentVolume(pv, false), nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) SupportsMountOption() bool {
|
||||
// TODO (vladimirvivien) use CSI VolumeCapability.MountVolume.mount_flags
|
||||
// to probe for the result for this method:w
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *csiPlugin) SupportsBulkVolumeVerification() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// volume.AttachableVolumePlugin methods
|
||||
var _ volume.AttachableVolumePlugin = &csiPlugin{}
|
||||
|
||||
func (p *csiPlugin) NewAttacher() (volume.Attacher, error) {
|
||||
k8s := p.host.GetKubeClient()
|
||||
if k8s == nil {
|
||||
glog.Error(log("unable to get kubernetes client from host"))
|
||||
return nil, errors.New("unable to get Kubernetes client")
|
||||
}
|
||||
|
||||
return &csiAttacher{
|
||||
plugin: p,
|
||||
k8s: k8s,
|
||||
waitSleepTime: 1 * time.Second,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
k8s := p.host.GetKubeClient()
|
||||
if k8s == nil {
|
||||
glog.Error(log("unable to get kubernetes client from host"))
|
||||
return nil, errors.New("unable to get Kubernetes client")
|
||||
}
|
||||
|
||||
return &csiAttacher{
|
||||
plugin: p,
|
||||
k8s: k8s,
|
||||
waitSleepTime: 1 * time.Second,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
|
||||
m := p.host.GetMounter(p.GetPluginName())
|
||||
return mount.GetMountRefs(m, deviceMountPath)
|
||||
}
|
||||
|
||||
// BlockVolumePlugin methods
|
||||
var _ volume.BlockVolumePlugin = &csiPlugin{}
|
||||
|
||||
func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opts volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
|
||||
if !p.blockEnabled {
|
||||
return nil, errors.New("CSIBlockVolume feature not enabled")
|
||||
}
|
||||
|
||||
pvSource, err := getCSISourceFromSpec(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
readOnly, err := getReadOnlyFromSpec(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Info(log("setting up block mapper for [volume=%v,driver=%v]", pvSource.VolumeHandle, pvSource.Driver))
|
||||
client := newCsiDriverClient(pvSource.Driver)
|
||||
|
||||
k8s := p.host.GetKubeClient()
|
||||
if k8s == nil {
|
||||
glog.Error(log("failed to get a kubernetes client"))
|
||||
return nil, errors.New("failed to get a Kubernetes client")
|
||||
}
|
||||
|
||||
mapper := &csiBlockMapper{
|
||||
csiClient: client,
|
||||
k8s: k8s,
|
||||
plugin: p,
|
||||
volumeID: pvSource.VolumeHandle,
|
||||
driverName: pvSource.Driver,
|
||||
readOnly: readOnly,
|
||||
spec: spec,
|
||||
podUID: podRef.UID,
|
||||
}
|
||||
|
||||
// Save volume info in pod dir
|
||||
dataDir := getVolumeDeviceDataDir(spec.Name(), p.host)
|
||||
|
||||
if err := os.MkdirAll(dataDir, 0750); err != nil {
|
||||
glog.Error(log("failed to create data dir %s: %v", dataDir, err))
|
||||
return nil, err
|
||||
}
|
||||
glog.V(4).Info(log("created path successfully [%s]", dataDir))
|
||||
|
||||
// persist volume info data for teardown
|
||||
node := string(p.host.GetNodeName())
|
||||
attachID := getAttachmentName(pvSource.VolumeHandle, pvSource.Driver, node)
|
||||
volData := map[string]string{
|
||||
volDataKey.specVolID: spec.Name(),
|
||||
volDataKey.volHandle: pvSource.VolumeHandle,
|
||||
volDataKey.driverName: pvSource.Driver,
|
||||
volDataKey.nodeName: node,
|
||||
volDataKey.attachmentID: attachID,
|
||||
}
|
||||
|
||||
if err := saveVolumeData(dataDir, volDataFileName, volData); err != nil {
|
||||
glog.Error(log("failed to save volume info data: %v", err))
|
||||
if err := os.RemoveAll(dataDir); err != nil {
|
||||
glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err))
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mapper, nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
|
||||
if !p.blockEnabled {
|
||||
return nil, errors.New("CSIBlockVolume feature not enabled")
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("setting up block unmapper for [Spec=%v, podUID=%v]", volName, podUID))
|
||||
unmapper := &csiBlockMapper{
|
||||
plugin: p,
|
||||
podUID: podUID,
|
||||
specName: volName,
|
||||
}
|
||||
|
||||
// load volume info from file
|
||||
dataDir := getVolumeDeviceDataDir(unmapper.specName, p.host)
|
||||
data, err := loadVolumeData(dataDir, volDataFileName)
|
||||
if err != nil {
|
||||
glog.Error(log("unmapper failed to load volume data file [%s]: %v", dataDir, err))
|
||||
return nil, err
|
||||
}
|
||||
unmapper.driverName = data[volDataKey.driverName]
|
||||
unmapper.volumeID = data[volDataKey.volHandle]
|
||||
unmapper.csiClient = newCsiDriverClient(unmapper.driverName)
|
||||
|
||||
return unmapper, nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) ConstructBlockVolumeSpec(podUID types.UID, specVolName, mapPath string) (*volume.Spec, error) {
|
||||
if !p.blockEnabled {
|
||||
return nil, errors.New("CSIBlockVolume feature not enabled")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("plugin.ConstructBlockVolumeSpec [podUID=%s, specVolName=%s, path=%s]", string(podUID), specVolName, mapPath)
|
||||
|
||||
dataDir := getVolumeDeviceDataDir(specVolName, p.host)
|
||||
volData, err := loadVolumeData(dataDir, volDataFileName)
|
||||
if err != nil {
|
||||
glog.Error(log("plugin.ConstructBlockVolumeSpec failed loading volume data using [%s]: %v", mapPath, err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Info(log("plugin.ConstructBlockVolumeSpec extracted [%#v]", volData))
|
||||
|
||||
blockMode := api.PersistentVolumeBlock
|
||||
pv := &api.PersistentVolume{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: volData[volDataKey.specVolID],
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
CSI: &api.CSIPersistentVolumeSource{
|
||||
Driver: volData[volDataKey.driverName],
|
||||
VolumeHandle: volData[volDataKey.volHandle],
|
||||
},
|
||||
},
|
||||
VolumeMode: &blockMode,
|
||||
},
|
||||
}
|
||||
|
||||
return volume.NewSpecFromPersistentVolume(pv, false), nil
|
||||
}
|
475
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin_test.go
generated
vendored
Normal file
475
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin_test.go
generated
vendored
Normal file
@@ -0,0 +1,475 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
fakeclient "k8s.io/client-go/kubernetes/fake"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
// create a plugin mgr to load plugins and setup a fake client
|
||||
func newTestPlugin(t *testing.T) (*csiPlugin, string) {
|
||||
err := utilfeature.DefaultFeatureGate.Set("CSIBlockVolume=true")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to enable feature gate for CSIBlockVolume: %v", err)
|
||||
}
|
||||
|
||||
tmpDir, err := utiltesting.MkTmpdir("csi-test")
|
||||
if err != nil {
|
||||
t.Fatalf("can't create temp dir: %v", err)
|
||||
}
|
||||
|
||||
fakeClient := fakeclient.NewSimpleClientset()
|
||||
host := volumetest.NewFakeVolumeHost(
|
||||
tmpDir,
|
||||
fakeClient,
|
||||
nil,
|
||||
)
|
||||
plugMgr := &volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
|
||||
|
||||
plug, err := plugMgr.FindPluginByName(csiPluginName)
|
||||
if err != nil {
|
||||
t.Fatalf("can't find plugin %v", csiPluginName)
|
||||
}
|
||||
|
||||
csiPlug, ok := plug.(*csiPlugin)
|
||||
if !ok {
|
||||
t.Fatalf("cannot assert plugin to be type csiPlugin")
|
||||
}
|
||||
|
||||
return csiPlug, tmpDir
|
||||
}
|
||||
|
||||
func makeTestPV(name string, sizeGig int, driverName, volID string) *api.PersistentVolume {
|
||||
return &api.PersistentVolume{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse(
|
||||
fmt.Sprintf("%dGi", sizeGig),
|
||||
),
|
||||
},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
CSI: &api.CSIPersistentVolumeSource{
|
||||
Driver: driverName,
|
||||
VolumeHandle: volID,
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginGetPluginName(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
if plug.GetPluginName() != "kubernetes.io/csi" {
|
||||
t.Errorf("unexpected plugin name %v", plug.GetPluginName())
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginGetVolumeName(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
testCases := []struct {
|
||||
name string
|
||||
driverName string
|
||||
volName string
|
||||
shouldFail bool
|
||||
}{
|
||||
{"alphanum names", "testdr", "testvol", false},
|
||||
{"mixchar driver", "test.dr.cc", "testvol", false},
|
||||
{"mixchar volume", "testdr", "test-vol-name", false},
|
||||
{"mixchars all", "test-driver", "test.vol.name", false},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("testing: %s", tc.name)
|
||||
pv := makeTestPV("test-pv", 10, tc.driverName, tc.volName)
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, false)
|
||||
name, err := plug.GetVolumeName(spec)
|
||||
if tc.shouldFail && err == nil {
|
||||
t.Fatal("GetVolumeName should fail, but got err=nil")
|
||||
}
|
||||
if name != fmt.Sprintf("%s%s%s", tc.driverName, volNameSep, tc.volName) {
|
||||
t.Errorf("unexpected volume name %s", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginCanSupport(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, false)
|
||||
|
||||
if !plug.CanSupport(spec) {
|
||||
t.Errorf("should support CSI spec")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginConstructVolumeSpec(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
specVolID string
|
||||
data map[string]string
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
name: "valid spec name",
|
||||
specVolID: "test.vol.id",
|
||||
data: map[string]string{volDataKey.specVolID: "test.vol.id", volDataKey.volHandle: "test-vol0", volDataKey.driverName: "test-driver0"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("test case: %s", tc.name)
|
||||
dir := getTargetPath(testPodUID, tc.specVolID, plug.host)
|
||||
|
||||
// create the data file
|
||||
if tc.data != nil {
|
||||
mountDir := path.Join(getTargetPath(testPodUID, tc.specVolID, plug.host), "/mount")
|
||||
if err := os.MkdirAll(mountDir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
t.Errorf("failed to create dir [%s]: %v", mountDir, err)
|
||||
}
|
||||
if err := saveVolumeData(path.Dir(mountDir), volDataFileName, tc.data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// rebuild spec
|
||||
spec, err := plug.ConstructVolumeSpec("test-pv", dir)
|
||||
if tc.shouldFail {
|
||||
if err == nil {
|
||||
t.Fatal("expecting ConstructVolumeSpec to fail, but got nil error")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
volHandle := spec.PersistentVolume.Spec.CSI.VolumeHandle
|
||||
if volHandle != tc.data[volDataKey.volHandle] {
|
||||
t.Errorf("expected volID %s, got volID %s", tc.data[volDataKey.volHandle], volHandle)
|
||||
}
|
||||
|
||||
if spec.Name() != tc.specVolID {
|
||||
t.Errorf("Unexpected spec name %s", spec.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginNewMounter(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
mounter, err := plug.NewMounter(
|
||||
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
|
||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
|
||||
if mounter == nil {
|
||||
t.Fatal("failed to create CSI mounter")
|
||||
}
|
||||
csiMounter := mounter.(*csiMountMgr)
|
||||
|
||||
// validate mounter fields
|
||||
if csiMounter.driverName != testDriver {
|
||||
t.Error("mounter driver name not set")
|
||||
}
|
||||
if csiMounter.volumeID != testVol {
|
||||
t.Error("mounter volume id not set")
|
||||
}
|
||||
if csiMounter.pod == nil {
|
||||
t.Error("mounter pod not set")
|
||||
}
|
||||
if csiMounter.podUID == types.UID("") {
|
||||
t.Error("mounter podUID not set")
|
||||
}
|
||||
if csiMounter.csiClient == nil {
|
||||
t.Error("mounter csiClient is nil")
|
||||
}
|
||||
|
||||
// ensure data file is created
|
||||
dataDir := path.Dir(mounter.GetPath())
|
||||
dataFile := filepath.Join(dataDir, volDataFileName)
|
||||
if _, err := os.Stat(dataFile); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("data file not created %s", dataFile)
|
||||
} else {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginNewUnmounter(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
|
||||
// save the data file to re-create client
|
||||
dir := path.Join(getTargetPath(testPodUID, pv.ObjectMeta.Name, plug.host), "/mount")
|
||||
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
t.Errorf("failed to create dir [%s]: %v", dir, err)
|
||||
}
|
||||
|
||||
if err := saveVolumeData(
|
||||
path.Dir(dir),
|
||||
volDataFileName,
|
||||
map[string]string{
|
||||
volDataKey.specVolID: pv.ObjectMeta.Name,
|
||||
volDataKey.driverName: testDriver,
|
||||
volDataKey.volHandle: testVol,
|
||||
},
|
||||
); err != nil {
|
||||
t.Fatalf("failed to save volume data: %v", err)
|
||||
}
|
||||
|
||||
// test unmounter
|
||||
unmounter, err := plug.NewUnmounter(pv.ObjectMeta.Name, testPodUID)
|
||||
csiUnmounter := unmounter.(*csiMountMgr)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
|
||||
if csiUnmounter == nil {
|
||||
t.Fatal("failed to create CSI Unmounter")
|
||||
}
|
||||
|
||||
if csiUnmounter.podUID != testPodUID {
|
||||
t.Error("podUID not set")
|
||||
}
|
||||
|
||||
if csiUnmounter.csiClient == nil {
|
||||
t.Error("unmounter csiClient is nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginNewAttacher(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
attacher, err := plug.NewAttacher()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err)
|
||||
}
|
||||
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
if csiAttacher.plugin == nil {
|
||||
t.Error("plugin not set for attacher")
|
||||
}
|
||||
if csiAttacher.k8s == nil {
|
||||
t.Error("Kubernetes client not set for attacher")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginNewDetacher(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
detacher, err := plug.NewDetacher()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new detacher: %v", err)
|
||||
}
|
||||
|
||||
csiDetacher := detacher.(*csiAttacher)
|
||||
if csiDetacher.plugin == nil {
|
||||
t.Error("plugin not set for detacher")
|
||||
}
|
||||
if csiDetacher.k8s == nil {
|
||||
t.Error("Kubernetes client not set for detacher")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginNewBlockMapper(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pv := makeTestPV("test-block-pv", 10, testDriver, testVol)
|
||||
mounter, err := plug.NewBlockVolumeMapper(
|
||||
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
|
||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new BlockMapper: %v", err)
|
||||
}
|
||||
|
||||
if mounter == nil {
|
||||
t.Fatal("failed to create CSI BlockMapper, mapper is nill")
|
||||
}
|
||||
csiMapper := mounter.(*csiBlockMapper)
|
||||
|
||||
// validate mounter fields
|
||||
if csiMapper.driverName != testDriver {
|
||||
t.Error("CSI block mapper missing driver name")
|
||||
}
|
||||
if csiMapper.volumeID != testVol {
|
||||
t.Error("CSI block mapper missing volumeID")
|
||||
}
|
||||
|
||||
if csiMapper.podUID == types.UID("") {
|
||||
t.Error("CSI block mapper missing pod.UID")
|
||||
}
|
||||
if csiMapper.csiClient == nil {
|
||||
t.Error("mapper csiClient is nil")
|
||||
}
|
||||
|
||||
// ensure data file is created
|
||||
dataFile := getVolumeDeviceDataDir(csiMapper.spec.Name(), plug.host)
|
||||
if _, err := os.Stat(dataFile); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("data file not created %s", dataFile)
|
||||
} else {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginNewUnmapper(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
|
||||
// save the data file to re-create client
|
||||
dir := getVolumeDeviceDataDir(pv.ObjectMeta.Name, plug.host)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
t.Errorf("failed to create dir [%s]: %v", dir, err)
|
||||
}
|
||||
|
||||
if err := saveVolumeData(
|
||||
dir,
|
||||
volDataFileName,
|
||||
map[string]string{
|
||||
volDataKey.specVolID: pv.ObjectMeta.Name,
|
||||
volDataKey.driverName: testDriver,
|
||||
volDataKey.volHandle: testVol,
|
||||
},
|
||||
); err != nil {
|
||||
t.Fatalf("failed to save volume data: %v", err)
|
||||
}
|
||||
|
||||
// test unmounter
|
||||
unmapper, err := plug.NewBlockVolumeUnmapper(pv.ObjectMeta.Name, testPodUID)
|
||||
csiUnmapper := unmapper.(*csiBlockMapper)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
|
||||
if csiUnmapper == nil {
|
||||
t.Fatal("failed to create CSI Unmounter")
|
||||
}
|
||||
|
||||
if csiUnmapper.podUID != testPodUID {
|
||||
t.Error("podUID not set")
|
||||
}
|
||||
|
||||
if csiUnmapper.specName != pv.ObjectMeta.Name {
|
||||
t.Error("specName not set")
|
||||
}
|
||||
|
||||
if csiUnmapper.csiClient == nil {
|
||||
t.Error("unmapper csiClient is nil")
|
||||
}
|
||||
|
||||
// test loaded vol data
|
||||
if csiUnmapper.driverName != testDriver {
|
||||
t.Error("unmapper driverName not set")
|
||||
}
|
||||
if csiUnmapper.volumeID != testVol {
|
||||
t.Error("unmapper volumeHandle not set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginConstructBlockVolumeSpec(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
specVolID string
|
||||
data map[string]string
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
name: "valid spec name",
|
||||
specVolID: "test.vol.id",
|
||||
data: map[string]string{volDataKey.specVolID: "test.vol.id", volDataKey.volHandle: "test-vol0", volDataKey.driverName: "test-driver0"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("test case: %s", tc.name)
|
||||
deviceDataDir := getVolumeDeviceDataDir(tc.specVolID, plug.host)
|
||||
|
||||
// create data file in csi plugin dir
|
||||
if tc.data != nil {
|
||||
if err := os.MkdirAll(deviceDataDir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
t.Errorf("failed to create dir [%s]: %v", deviceDataDir, err)
|
||||
}
|
||||
if err := saveVolumeData(deviceDataDir, volDataFileName, tc.data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// rebuild spec
|
||||
spec, err := plug.ConstructBlockVolumeSpec("test-podUID", tc.specVolID, getVolumeDevicePluginDir(tc.specVolID, plug.host))
|
||||
if tc.shouldFail {
|
||||
if err == nil {
|
||||
t.Fatal("expecting ConstructVolumeSpec to fail, but got nil error")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
volHandle := spec.PersistentVolume.Spec.CSI.VolumeHandle
|
||||
if volHandle != tc.data[volDataKey.volHandle] {
|
||||
t.Errorf("expected volID %s, got volID %s", tc.data[volDataKey.volHandle], volHandle)
|
||||
}
|
||||
|
||||
if spec.Name() != tc.specVolID {
|
||||
t.Errorf("Unexpected spec name %s", spec.Name())
|
||||
}
|
||||
}
|
||||
}
|
123
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go
generated
vendored
Normal file
123
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go
generated
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/golang/glog"
|
||||
api "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
func getCredentialsFromSecret(k8s kubernetes.Interface, secretRef *api.SecretReference) (map[string]string, error) {
|
||||
credentials := map[string]string{}
|
||||
secret, err := k8s.CoreV1().Secrets(secretRef.Namespace).Get(secretRef.Name, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Errorf("failed to find the secret %s in the namespace %s with error: %v\n", secretRef.Name, secretRef.Namespace, err)
|
||||
return credentials, err
|
||||
}
|
||||
for key, value := range secret.Data {
|
||||
credentials[key] = string(value)
|
||||
}
|
||||
|
||||
return credentials, nil
|
||||
}
|
||||
|
||||
// saveVolumeData persists parameter data as json file at the provided location
|
||||
func saveVolumeData(dir string, fileName string, data map[string]string) error {
|
||||
dataFilePath := path.Join(dir, fileName)
|
||||
glog.V(4).Info(log("saving volume data file [%s]", dataFilePath))
|
||||
file, err := os.Create(dataFilePath)
|
||||
if err != nil {
|
||||
glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
if err := json.NewEncoder(file).Encode(data); err != nil {
|
||||
glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
|
||||
return err
|
||||
}
|
||||
glog.V(4).Info(log("volume data file saved successfully [%s]", dataFilePath))
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadVolumeData loads volume info from specified json file/location
|
||||
func loadVolumeData(dir string, fileName string) (map[string]string, error) {
|
||||
// remove /mount at the end
|
||||
dataFileName := path.Join(dir, fileName)
|
||||
glog.V(4).Info(log("loading volume data file [%s]", dataFileName))
|
||||
|
||||
file, err := os.Open(dataFileName)
|
||||
if err != nil {
|
||||
glog.Error(log("failed to open volume data file [%s]: %v", dataFileName, err))
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
data := map[string]string{}
|
||||
if err := json.NewDecoder(file).Decode(&data); err != nil {
|
||||
glog.Error(log("failed to parse volume data file [%s]: %v", dataFileName, err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func getCSISourceFromSpec(spec *volume.Spec) (*api.CSIPersistentVolumeSource, error) {
|
||||
if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.CSI != nil {
|
||||
return spec.PersistentVolume.Spec.CSI, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("CSIPersistentVolumeSource not defined in spec")
|
||||
}
|
||||
|
||||
func getReadOnlyFromSpec(spec *volume.Spec) (bool, error) {
|
||||
if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.CSI != nil {
|
||||
return spec.ReadOnly, nil
|
||||
}
|
||||
|
||||
return false, fmt.Errorf("CSIPersistentVolumeSource not defined in spec")
|
||||
}
|
||||
|
||||
// log prepends log string with `kubernetes.io/csi`
|
||||
func log(msg string, parts ...interface{}) string {
|
||||
return fmt.Sprintf(fmt.Sprintf("%s: %s", csiPluginName, msg), parts...)
|
||||
}
|
||||
|
||||
// getVolumeDevicePluginDir returns the path where the CSI plugin keeps the
|
||||
// symlink for a block device associated with a given specVolumeID.
|
||||
// path: plugins/kubernetes.io/csi/volumeDevices/{specVolumeID}/dev
|
||||
func getVolumeDevicePluginDir(specVolID string, host volume.VolumeHost) string {
|
||||
sanitizedSpecVolID := kstrings.EscapeQualifiedNameForDisk(specVolID)
|
||||
return path.Join(host.GetVolumeDevicePluginDir(csiPluginName), sanitizedSpecVolID, "dev")
|
||||
}
|
||||
|
||||
// getVolumeDeviceDataDir returns the path where the CSI plugin keeps the
|
||||
// volume data for a block device associated with a given specVolumeID.
|
||||
// path: plugins/kubernetes.io/csi/volumeDevices/{specVolumeID}/data
|
||||
func getVolumeDeviceDataDir(specVolID string, host volume.VolumeHost) string {
|
||||
sanitizedSpecVolID := kstrings.EscapeQualifiedNameForDisk(specVolID)
|
||||
return path.Join(host.GetVolumeDevicePluginDir(csiPluginName), sanitizedSpecVolID, "data")
|
||||
}
|
26
vendor/k8s.io/kubernetes/pkg/volume/csi/fake/BUILD
generated
vendored
Normal file
26
vendor/k8s.io/kubernetes/pkg/volume/csi/fake/BUILD
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["fake_client.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/csi/fake",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
277
vendor/k8s.io/kubernetes/pkg/volume/csi/fake/fake_client.go
generated
vendored
Normal file
277
vendor/k8s.io/kubernetes/pkg/volume/csi/fake/fake_client.go
generated
vendored
Normal file
@@ -0,0 +1,277 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
|
||||
)
|
||||
|
||||
// IdentityClient is a CSI identity client used for testing
|
||||
type IdentityClient struct {
|
||||
nextErr error
|
||||
}
|
||||
|
||||
// NewIdentityClient returns a new IdentityClient
|
||||
func NewIdentityClient() *IdentityClient {
|
||||
return &IdentityClient{}
|
||||
}
|
||||
|
||||
// SetNextError injects expected error
|
||||
func (f *IdentityClient) SetNextError(err error) {
|
||||
f.nextErr = err
|
||||
}
|
||||
|
||||
// GetPluginInfo returns plugin info
|
||||
func (f *IdentityClient) GetPluginInfo(ctx context.Context, in *csipb.GetPluginInfoRequest, opts ...grpc.CallOption) (*csipb.GetPluginInfoResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetPluginCapabilities implements csi method
|
||||
func (f *IdentityClient) GetPluginCapabilities(ctx context.Context, in *csipb.GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*csipb.GetPluginCapabilitiesResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Probe implements csi method
|
||||
func (f *IdentityClient) Probe(ctx context.Context, in *csipb.ProbeRequest, opts ...grpc.CallOption) (*csipb.ProbeResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NodeClient returns CSI node client
|
||||
type NodeClient struct {
|
||||
nodePublishedVolumes map[string]string
|
||||
nodeStagedVolumes map[string]string
|
||||
stageUnstageSet bool
|
||||
nextErr error
|
||||
}
|
||||
|
||||
// NewNodeClient returns fake node client
|
||||
func NewNodeClient(stageUnstageSet bool) *NodeClient {
|
||||
return &NodeClient{
|
||||
nodePublishedVolumes: make(map[string]string),
|
||||
nodeStagedVolumes: make(map[string]string),
|
||||
stageUnstageSet: stageUnstageSet,
|
||||
}
|
||||
}
|
||||
|
||||
// SetNextError injects next expected error
|
||||
func (f *NodeClient) SetNextError(err error) {
|
||||
f.nextErr = err
|
||||
}
|
||||
|
||||
// GetNodePublishedVolumes returns node published volumes
|
||||
func (f *NodeClient) GetNodePublishedVolumes() map[string]string {
|
||||
return f.nodePublishedVolumes
|
||||
}
|
||||
|
||||
// GetNodeStagedVolumes returns node staged volumes
|
||||
func (f *NodeClient) GetNodeStagedVolumes() map[string]string {
|
||||
return f.nodeStagedVolumes
|
||||
}
|
||||
|
||||
func (f *NodeClient) AddNodeStagedVolume(volID, deviceMountPath string) {
|
||||
f.nodeStagedVolumes[volID] = deviceMountPath
|
||||
}
|
||||
|
||||
// NodePublishVolume implements CSI NodePublishVolume
|
||||
func (f *NodeClient) NodePublishVolume(ctx context.Context, req *csipb.NodePublishVolumeRequest, opts ...grpc.CallOption) (*csipb.NodePublishVolumeResponse, error) {
|
||||
|
||||
if f.nextErr != nil {
|
||||
return nil, f.nextErr
|
||||
}
|
||||
|
||||
if req.GetVolumeId() == "" {
|
||||
return nil, errors.New("missing volume id")
|
||||
}
|
||||
if req.GetTargetPath() == "" {
|
||||
return nil, errors.New("missing target path")
|
||||
}
|
||||
fsTypes := "block|ext4|xfs|zfs"
|
||||
fsType := req.GetVolumeCapability().GetMount().GetFsType()
|
||||
if !strings.Contains(fsTypes, fsType) {
|
||||
return nil, errors.New("invalid fstype")
|
||||
}
|
||||
f.nodePublishedVolumes[req.GetVolumeId()] = req.GetTargetPath()
|
||||
return &csipb.NodePublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
// NodeUnpublishVolume implements csi method
|
||||
func (f *NodeClient) NodeUnpublishVolume(ctx context.Context, req *csipb.NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*csipb.NodeUnpublishVolumeResponse, error) {
|
||||
if f.nextErr != nil {
|
||||
return nil, f.nextErr
|
||||
}
|
||||
|
||||
if req.GetVolumeId() == "" {
|
||||
return nil, errors.New("missing volume id")
|
||||
}
|
||||
if req.GetTargetPath() == "" {
|
||||
return nil, errors.New("missing target path")
|
||||
}
|
||||
delete(f.nodePublishedVolumes, req.GetVolumeId())
|
||||
return &csipb.NodeUnpublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
// NodeStagevolume implements csi method
|
||||
func (f *NodeClient) NodeStageVolume(ctx context.Context, req *csipb.NodeStageVolumeRequest, opts ...grpc.CallOption) (*csipb.NodeStageVolumeResponse, error) {
|
||||
if f.nextErr != nil {
|
||||
return nil, f.nextErr
|
||||
}
|
||||
|
||||
if req.GetVolumeId() == "" {
|
||||
return nil, errors.New("missing volume id")
|
||||
}
|
||||
if req.GetStagingTargetPath() == "" {
|
||||
return nil, errors.New("missing staging target path")
|
||||
}
|
||||
|
||||
fsType := ""
|
||||
fsTypes := "block|ext4|xfs|zfs"
|
||||
mounted := req.GetVolumeCapability().GetMount()
|
||||
if mounted != nil {
|
||||
fsType = mounted.GetFsType()
|
||||
}
|
||||
if !strings.Contains(fsTypes, fsType) {
|
||||
return nil, errors.New("invalid fstype")
|
||||
}
|
||||
|
||||
f.nodeStagedVolumes[req.GetVolumeId()] = req.GetStagingTargetPath()
|
||||
return &csipb.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
// NodeUnstageVolume implements csi method
|
||||
func (f *NodeClient) NodeUnstageVolume(ctx context.Context, req *csipb.NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*csipb.NodeUnstageVolumeResponse, error) {
|
||||
if f.nextErr != nil {
|
||||
return nil, f.nextErr
|
||||
}
|
||||
|
||||
if req.GetVolumeId() == "" {
|
||||
return nil, errors.New("missing volume id")
|
||||
}
|
||||
if req.GetStagingTargetPath() == "" {
|
||||
return nil, errors.New("missing staging target path")
|
||||
}
|
||||
|
||||
delete(f.nodeStagedVolumes, req.GetVolumeId())
|
||||
return &csipb.NodeUnstageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
// NodeGetId implements method
|
||||
func (f *NodeClient) NodeGetId(ctx context.Context, in *csipb.NodeGetIdRequest, opts ...grpc.CallOption) (*csipb.NodeGetIdResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NodeGetCapabilities implements csi method
|
||||
func (f *NodeClient) NodeGetCapabilities(ctx context.Context, in *csipb.NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipb.NodeGetCapabilitiesResponse, error) {
|
||||
resp := &csipb.NodeGetCapabilitiesResponse{
|
||||
Capabilities: []*csipb.NodeServiceCapability{
|
||||
{
|
||||
Type: &csipb.NodeServiceCapability_Rpc{
|
||||
Rpc: &csipb.NodeServiceCapability_RPC{
|
||||
Type: csipb.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if f.stageUnstageSet {
|
||||
return resp, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ControllerClient represents a CSI Controller client
|
||||
type ControllerClient struct {
|
||||
nextCapabilities []*csipb.ControllerServiceCapability
|
||||
nextErr error
|
||||
}
|
||||
|
||||
// NewControllerClient returns a ControllerClient
|
||||
func NewControllerClient() *ControllerClient {
|
||||
return &ControllerClient{}
|
||||
}
|
||||
|
||||
// SetNextError injects next expected error
|
||||
func (f *ControllerClient) SetNextError(err error) {
|
||||
f.nextErr = err
|
||||
}
|
||||
|
||||
// SetNextCapabilities injects next expected capabilities
|
||||
func (f *ControllerClient) SetNextCapabilities(caps []*csipb.ControllerServiceCapability) {
|
||||
f.nextCapabilities = caps
|
||||
}
|
||||
|
||||
// ControllerGetCapabilities implements csi method
|
||||
func (f *ControllerClient) ControllerGetCapabilities(ctx context.Context, in *csipb.ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipb.ControllerGetCapabilitiesResponse, error) {
|
||||
if f.nextErr != nil {
|
||||
return nil, f.nextErr
|
||||
}
|
||||
|
||||
if f.nextCapabilities == nil {
|
||||
f.nextCapabilities = []*csipb.ControllerServiceCapability{
|
||||
{
|
||||
Type: &csipb.ControllerServiceCapability_Rpc{
|
||||
Rpc: &csipb.ControllerServiceCapability_RPC{
|
||||
Type: csipb.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
return &csipb.ControllerGetCapabilitiesResponse{
|
||||
Capabilities: f.nextCapabilities,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateVolume implements csi method
|
||||
func (f *ControllerClient) CreateVolume(ctx context.Context, in *csipb.CreateVolumeRequest, opts ...grpc.CallOption) (*csipb.CreateVolumeResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// DeleteVolume implements csi method
|
||||
func (f *ControllerClient) DeleteVolume(ctx context.Context, in *csipb.DeleteVolumeRequest, opts ...grpc.CallOption) (*csipb.DeleteVolumeResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ControllerPublishVolume implements csi method
|
||||
func (f *ControllerClient) ControllerPublishVolume(ctx context.Context, in *csipb.ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*csipb.ControllerPublishVolumeResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ControllerUnpublishVolume implements csi method
|
||||
func (f *ControllerClient) ControllerUnpublishVolume(ctx context.Context, in *csipb.ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*csipb.ControllerUnpublishVolumeResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ValidateVolumeCapabilities implements csi method
|
||||
func (f *ControllerClient) ValidateVolumeCapabilities(ctx context.Context, in *csipb.ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*csipb.ValidateVolumeCapabilitiesResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ListVolumes implements csi method
|
||||
func (f *ControllerClient) ListVolumes(ctx context.Context, in *csipb.ListVolumesRequest, opts ...grpc.CallOption) (*csipb.ListVolumesResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetCapacity implements csi method
|
||||
func (f *ControllerClient) GetCapacity(ctx context.Context, in *csipb.GetCapacityRequest, opts ...grpc.CallOption) (*csipb.GetCapacityResponse, error) {
|
||||
return nil, nil
|
||||
}
|
30
vendor/k8s.io/kubernetes/pkg/volume/csi/labelmanager/BUILD
generated
vendored
Normal file
30
vendor/k8s.io/kubernetes/pkg/volume/csi/labelmanager/BUILD
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["labelmanager.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/csi/labelmanager",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
251
vendor/k8s.io/kubernetes/pkg/volume/csi/labelmanager/labelmanager.go
generated
vendored
Normal file
251
vendor/k8s.io/kubernetes/pkg/volume/csi/labelmanager/labelmanager.go
generated
vendored
Normal file
@@ -0,0 +1,251 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package labelmanager includes internal functions used to add/delete labels to
|
||||
// kubernetes nodes for corresponding CSI drivers
|
||||
package labelmanager // import "k8s.io/kubernetes/pkg/volume/csi/labelmanager"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/util/retry"
|
||||
)
|
||||
|
||||
const (
|
||||
// Name of node annotation that contains JSON map of driver names to node
|
||||
// names
|
||||
annotationKey = "csi.volume.kubernetes.io/nodeid"
|
||||
csiPluginName = "kubernetes.io/csi"
|
||||
)
|
||||
|
||||
// labelManagementStruct is struct of channels used for communication between the driver registration
|
||||
// code and the go routine responsible for managing the node's labels
|
||||
type labelManagerStruct struct {
|
||||
nodeName types.NodeName
|
||||
k8s kubernetes.Interface
|
||||
}
|
||||
|
||||
// Interface implements an interface for managing labels of a node
|
||||
type Interface interface {
|
||||
AddLabels(driverName string) error
|
||||
}
|
||||
|
||||
// NewLabelManager initializes labelManagerStruct and returns available interfaces
|
||||
func NewLabelManager(nodeName types.NodeName, kubeClient kubernetes.Interface) Interface {
|
||||
return labelManagerStruct{
|
||||
nodeName: nodeName,
|
||||
k8s: kubeClient,
|
||||
}
|
||||
}
|
||||
|
||||
// nodeLabelManager waits for labeling requests initiated by the driver's registration
|
||||
// process.
|
||||
func (lm labelManagerStruct) AddLabels(driverName string) error {
|
||||
err := verifyAndAddNodeId(string(lm.nodeName), lm.k8s.CoreV1().Nodes(), driverName, string(lm.nodeName))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update node %s's annotation with error: %+v", lm.nodeName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clones the given map and returns a new map with the given key and value added.
|
||||
// Returns the given map, if annotationKey is empty.
|
||||
func cloneAndAddAnnotation(
|
||||
annotations map[string]string,
|
||||
annotationKey,
|
||||
annotationValue string) map[string]string {
|
||||
if annotationKey == "" {
|
||||
// Don't need to add an annotation.
|
||||
return annotations
|
||||
}
|
||||
// Clone.
|
||||
newAnnotations := map[string]string{}
|
||||
for key, value := range annotations {
|
||||
newAnnotations[key] = value
|
||||
}
|
||||
newAnnotations[annotationKey] = annotationValue
|
||||
return newAnnotations
|
||||
}
|
||||
|
||||
func verifyAndAddNodeId(
|
||||
k8sNodeName string,
|
||||
k8sNodesClient corev1.NodeInterface,
|
||||
csiDriverName string,
|
||||
csiDriverNodeId string) error {
|
||||
// Add or update annotation on Node object
|
||||
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
// Retrieve the latest version of Node before attempting update, so that
|
||||
// existing changes are not overwritten. RetryOnConflict uses
|
||||
// exponential backoff to avoid exhausting the apiserver.
|
||||
result, getErr := k8sNodesClient.Get(k8sNodeName, metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
glog.Errorf("Failed to get latest version of Node: %v", getErr)
|
||||
return getErr // do not wrap error
|
||||
}
|
||||
|
||||
var previousAnnotationValue string
|
||||
if result.ObjectMeta.Annotations != nil {
|
||||
previousAnnotationValue =
|
||||
result.ObjectMeta.Annotations[annotationKey]
|
||||
glog.V(3).Infof(
|
||||
"previousAnnotationValue=%q", previousAnnotationValue)
|
||||
}
|
||||
|
||||
existingDriverMap := map[string]string{}
|
||||
if previousAnnotationValue != "" {
|
||||
// Parse previousAnnotationValue as JSON
|
||||
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed to parse node's %q annotation value (%q) err=%v",
|
||||
annotationKey,
|
||||
previousAnnotationValue,
|
||||
err)
|
||||
}
|
||||
}
|
||||
|
||||
if val, ok := existingDriverMap[csiDriverName]; ok {
|
||||
if val == csiDriverNodeId {
|
||||
// Value already exists in node annotation, nothing more to do
|
||||
glog.V(1).Infof(
|
||||
"The key value {%q: %q} alredy eixst in node %q annotation, no need to update: %v",
|
||||
csiDriverName,
|
||||
csiDriverNodeId,
|
||||
annotationKey,
|
||||
previousAnnotationValue)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Add/update annotation value
|
||||
existingDriverMap[csiDriverName] = csiDriverNodeId
|
||||
jsonObj, err := json.Marshal(existingDriverMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed while trying to add key value {%q: %q} to node %q annotation. Existing value: %v",
|
||||
csiDriverName,
|
||||
csiDriverNodeId,
|
||||
annotationKey,
|
||||
previousAnnotationValue)
|
||||
}
|
||||
|
||||
result.ObjectMeta.Annotations = cloneAndAddAnnotation(
|
||||
result.ObjectMeta.Annotations,
|
||||
annotationKey,
|
||||
string(jsonObj))
|
||||
_, updateErr := k8sNodesClient.Update(result)
|
||||
if updateErr == nil {
|
||||
fmt.Printf(
|
||||
"Updated node %q successfully for CSI driver %q and CSI node name %q",
|
||||
k8sNodeName,
|
||||
csiDriverName,
|
||||
csiDriverNodeId)
|
||||
}
|
||||
return updateErr // do not wrap error
|
||||
})
|
||||
if retryErr != nil {
|
||||
return fmt.Errorf("node update failed: %v", retryErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetches Kubernetes node API object corresponding to k8sNodeName.
|
||||
// If the csiDriverName is present in the node annotation, it is removed.
|
||||
func verifyAndDeleteNodeId(
|
||||
k8sNodeName string,
|
||||
k8sNodesClient corev1.NodeInterface,
|
||||
csiDriverName string) error {
|
||||
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
// Retrieve the latest version of Node before attempting update, so that
|
||||
// existing changes are not overwritten. RetryOnConflict uses
|
||||
// exponential backoff to avoid exhausting the apiserver.
|
||||
result, getErr := k8sNodesClient.Get(k8sNodeName, metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
glog.Errorf("failed to get latest version of Node: %v", getErr)
|
||||
return getErr // do not wrap error
|
||||
}
|
||||
|
||||
var previousAnnotationValue string
|
||||
if result.ObjectMeta.Annotations != nil {
|
||||
previousAnnotationValue =
|
||||
result.ObjectMeta.Annotations[annotationKey]
|
||||
glog.V(3).Infof(
|
||||
"previousAnnotationValue=%q", previousAnnotationValue)
|
||||
}
|
||||
|
||||
existingDriverMap := map[string]string{}
|
||||
if previousAnnotationValue == "" {
|
||||
// Value already exists in node annotation, nothing more to do
|
||||
glog.V(1).Infof(
|
||||
"The key %q does not exist in node %q annotation, no need to cleanup.",
|
||||
csiDriverName,
|
||||
annotationKey)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse previousAnnotationValue as JSON
|
||||
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed to parse node's %q annotation value (%q) err=%v",
|
||||
annotationKey,
|
||||
previousAnnotationValue,
|
||||
err)
|
||||
}
|
||||
|
||||
if _, ok := existingDriverMap[csiDriverName]; !ok {
|
||||
// Value already exists in node annotation, nothing more to do
|
||||
glog.V(1).Infof(
|
||||
"The key %q does not eixst in node %q annotation, no need to cleanup: %v",
|
||||
csiDriverName,
|
||||
annotationKey,
|
||||
previousAnnotationValue)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add/update annotation value
|
||||
delete(existingDriverMap, csiDriverName)
|
||||
jsonObj, err := json.Marshal(existingDriverMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed while trying to remove key %q from node %q annotation. Existing data: %v",
|
||||
csiDriverName,
|
||||
annotationKey,
|
||||
previousAnnotationValue)
|
||||
}
|
||||
|
||||
result.ObjectMeta.Annotations = cloneAndAddAnnotation(
|
||||
result.ObjectMeta.Annotations,
|
||||
annotationKey,
|
||||
string(jsonObj))
|
||||
_, updateErr := k8sNodesClient.Update(result)
|
||||
if updateErr == nil {
|
||||
fmt.Printf(
|
||||
"Updated node %q annotation to remove CSI driver %q.",
|
||||
k8sNodeName,
|
||||
csiDriverName)
|
||||
}
|
||||
return updateErr // do not wrap error
|
||||
})
|
||||
if retryErr != nil {
|
||||
return fmt.Errorf("node update failed: %v", retryErr)
|
||||
}
|
||||
return nil
|
||||
}
|
Reference in New Issue
Block a user