Add generated file
This PR adds generated files under pkg/client and vendor folder.
This commit is contained in:
84
vendor/k8s.io/kubernetes/pkg/volume/local/BUILD
generated
vendored
Normal file
84
vendor/k8s.io/kubernetes/pkg/volume/local/BUILD
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"local.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/local",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
"//pkg/util/keymutex:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/validation:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = select({
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"local_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"local_linux_test.go",
|
||||
"local_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"local_test.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
embed = [":go_default_library"],
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
14
vendor/k8s.io/kubernetes/pkg/volume/local/OWNERS
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/pkg/volume/local/OWNERS
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
approvers:
|
||||
- saad-ali
|
||||
- thockin
|
||||
- vishh
|
||||
- msau42
|
||||
- jingxu97
|
||||
- jsafrane
|
||||
reviewers:
|
||||
- saad-ali
|
||||
- jsafrane
|
||||
- rootfs
|
||||
- jingxu97
|
||||
- msau42
|
||||
- vishh
|
18
vendor/k8s.io/kubernetes/pkg/volume/local/doc.go
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/pkg/volume/local/doc.go
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package local contains the internal representation of local volumes
|
||||
package local // import "k8s.io/kubernetes/pkg/volume/local"
|
423
vendor/k8s.io/kubernetes/pkg/volume/local/local.go
generated
vendored
Normal file
423
vendor/k8s.io/kubernetes/pkg/volume/local/local.go
generated
vendored
Normal file
@@ -0,0 +1,423 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/util/keymutex"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
stringsutil "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/validation"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&localVolumePlugin{}}
|
||||
}
|
||||
|
||||
type localVolumePlugin struct {
|
||||
host volume.VolumeHost
|
||||
volumeLocks keymutex.KeyMutex
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &localVolumePlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &localVolumePlugin{}
|
||||
var _ volume.BlockVolumePlugin = &localVolumePlugin{}
|
||||
|
||||
const (
|
||||
localVolumePluginName = "kubernetes.io/local-volume"
|
||||
)
|
||||
|
||||
func (plugin *localVolumePlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
plugin.volumeLocks = keymutex.NewKeyMutex()
|
||||
plugin.recorder = host.GetEventRecorder()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *localVolumePlugin) GetPluginName() string {
|
||||
return localVolumePluginName
|
||||
}
|
||||
|
||||
func (plugin *localVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
// This volume is only supported as a PersistentVolumeSource, so the PV name is unique
|
||||
return spec.Name(), nil
|
||||
}
|
||||
|
||||
func (plugin *localVolumePlugin) CanSupport(spec *volume.Spec) bool {
|
||||
// This volume is only supported as a PersistentVolumeSource
|
||||
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Local != nil)
|
||||
}
|
||||
|
||||
func (plugin *localVolumePlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *localVolumePlugin) SupportsMountOption() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *localVolumePlugin) SupportsBulkVolumeVerification() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *localVolumePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
// The current meaning of AccessMode is how many nodes can attach to it, not how many pods can mount it
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
}
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.LocalVolumeSource, bool, error) {
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Local != nil {
|
||||
return spec.PersistentVolume.Spec.Local, spec.ReadOnly, nil
|
||||
}
|
||||
|
||||
return nil, false, fmt.Errorf("Spec does not reference a Local volume type")
|
||||
}
|
||||
|
||||
func (plugin *localVolumePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &localVolumeMounter{
|
||||
localVolume: &localVolume{
|
||||
pod: pod,
|
||||
podUID: pod.UID,
|
||||
volName: spec.Name(),
|
||||
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
|
||||
plugin: plugin,
|
||||
globalPath: volumeSource.Path,
|
||||
MetricsProvider: volume.NewMetricsStatFS(volumeSource.Path),
|
||||
},
|
||||
readOnly: readOnly,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func (plugin *localVolumePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
return &localVolumeUnmounter{
|
||||
localVolume: &localVolume{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
|
||||
plugin: plugin,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *localVolumePlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod,
|
||||
_ volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &localVolumeMapper{
|
||||
localVolume: &localVolume{
|
||||
podUID: pod.UID,
|
||||
volName: spec.Name(),
|
||||
globalPath: volumeSource.Path,
|
||||
plugin: plugin,
|
||||
},
|
||||
readOnly: readOnly,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func (plugin *localVolumePlugin) NewBlockVolumeUnmapper(volName string,
|
||||
podUID types.UID) (volume.BlockVolumeUnmapper, error) {
|
||||
return &localVolumeUnmapper{
|
||||
localVolume: &localVolume{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
plugin: plugin,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TODO: check if no path and no topology constraints are ok
|
||||
func (plugin *localVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
localVolume := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: volumeName,
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Local: &v1.LocalVolumeSource{
|
||||
Path: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromPersistentVolume(localVolume, false), nil
|
||||
}
|
||||
|
||||
func (plugin *localVolumePlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName,
|
||||
mapPath string) (*volume.Spec, error) {
|
||||
block := v1.PersistentVolumeBlock
|
||||
|
||||
localVolume := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: volumeName,
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Local: &v1.LocalVolumeSource{
|
||||
Path: "",
|
||||
},
|
||||
},
|
||||
VolumeMode: &block,
|
||||
},
|
||||
}
|
||||
|
||||
return volume.NewSpecFromPersistentVolume(localVolume, false), nil
|
||||
}
|
||||
|
||||
// Local volumes represent a local directory on a node.
|
||||
// The directory at the globalPath will be bind-mounted to the pod's directory
|
||||
type localVolume struct {
|
||||
volName string
|
||||
pod *v1.Pod
|
||||
podUID types.UID
|
||||
// Global path to the volume
|
||||
globalPath string
|
||||
// Mounter interface that provides system calls to mount the global path to the pod local path.
|
||||
mounter mount.Interface
|
||||
plugin *localVolumePlugin
|
||||
volume.MetricsProvider
|
||||
}
|
||||
|
||||
func (l *localVolume) GetPath() string {
|
||||
return l.plugin.host.GetPodVolumeDir(l.podUID, stringsutil.EscapeQualifiedNameForDisk(localVolumePluginName), l.volName)
|
||||
}
|
||||
|
||||
type localVolumeMounter struct {
|
||||
*localVolume
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &localVolumeMounter{}
|
||||
|
||||
func (m *localVolumeMounter) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: m.readOnly,
|
||||
Managed: !m.readOnly,
|
||||
SupportsSELinux: true,
|
||||
}
|
||||
}
|
||||
|
||||
// CanMount checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (m *localVolumeMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUp bind mounts the directory to the volume path
|
||||
func (m *localVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return m.SetUpAt(m.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
// SetUpAt bind mounts the directory to the volume path and sets up volume ownership
|
||||
func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
m.plugin.volumeLocks.LockKey(m.globalPath)
|
||||
defer m.plugin.volumeLocks.UnlockKey(m.globalPath)
|
||||
|
||||
if m.globalPath == "" {
|
||||
return fmt.Errorf("LocalVolume volume %q path is empty", m.volName)
|
||||
}
|
||||
|
||||
err := validation.ValidatePathNoBacksteps(m.globalPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid path: %s %v", m.globalPath, err)
|
||||
}
|
||||
|
||||
notMnt, err := m.mounter.IsNotMountPoint(dir)
|
||||
glog.V(4).Infof("LocalVolume mount setup: PodDir(%s) VolDir(%s) Mounted(%t) Error(%v), ReadOnly(%t)", dir, m.globalPath, !notMnt, err, m.readOnly)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
glog.Errorf("cannot validate mount point: %s %v", dir, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !notMnt {
|
||||
return nil
|
||||
}
|
||||
refs, err := m.mounter.GetMountRefs(m.globalPath)
|
||||
if fsGroup != nil {
|
||||
if err != nil {
|
||||
glog.Errorf("cannot collect mounting information: %s %v", m.globalPath, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Only count mounts from other pods
|
||||
refs = m.filterPodMounts(refs)
|
||||
if len(refs) > 0 {
|
||||
fsGroupNew := int64(*fsGroup)
|
||||
fsGroupOld, err := m.mounter.GetFSGroup(m.globalPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check fsGroup for %s (%v)", m.globalPath, err)
|
||||
}
|
||||
if fsGroupNew != fsGroupOld {
|
||||
m.plugin.recorder.Eventf(m.pod, v1.EventTypeWarning, events.WarnAlreadyMountedVolume, "The requested fsGroup is %d, but the volume %s has GID %d. The volume may not be shareable.", fsGroupNew, m.volName, fsGroupOld)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
// skip below MkdirAll for windows since the "bind mount" logic is implemented differently in mount_wiondows.go
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
glog.Errorf("mkdir failed on disk %s (%v)", dir, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Perform a bind mount to the full path to allow duplicate mounts of the same volume.
|
||||
options := []string{"bind"}
|
||||
if m.readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("attempting to mount %s", dir)
|
||||
globalPath := util.MakeAbsolutePath(runtime.GOOS, m.globalPath)
|
||||
err = m.mounter.Mount(globalPath, dir, "", options)
|
||||
if err != nil {
|
||||
glog.Errorf("Mount of volume %s failed: %v", dir, err)
|
||||
notMnt, mntErr := m.mounter.IsNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
if mntErr = m.mounter.Unmount(dir); mntErr != nil {
|
||||
glog.Errorf("Failed to unmount: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
notMnt, mntErr = m.mounter.IsNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
// This is very odd, we don't expect it. We'll try again next sync loop.
|
||||
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
|
||||
return err
|
||||
}
|
||||
}
|
||||
os.Remove(dir)
|
||||
return err
|
||||
}
|
||||
if !m.readOnly {
|
||||
// Volume owner will be written only once on the first volume mount
|
||||
if len(refs) == 0 {
|
||||
return volume.SetVolumeOwnership(m, fsGroup)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// filterPodMounts only returns mount paths inside the kubelet pod directory
|
||||
func (m *localVolumeMounter) filterPodMounts(refs []string) []string {
|
||||
filtered := []string{}
|
||||
for _, r := range refs {
|
||||
if strings.HasPrefix(r, m.plugin.host.GetPodsDir()+string(os.PathSeparator)) {
|
||||
filtered = append(filtered, r)
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
type localVolumeUnmounter struct {
|
||||
*localVolume
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &localVolumeUnmounter{}
|
||||
|
||||
// TearDown unmounts the bind mount
|
||||
func (u *localVolumeUnmounter) TearDown() error {
|
||||
return u.TearDownAt(u.GetPath())
|
||||
}
|
||||
|
||||
// TearDownAt unmounts the bind mount
|
||||
func (u *localVolumeUnmounter) TearDownAt(dir string) error {
|
||||
glog.V(4).Infof("Unmounting volume %q at path %q\n", u.volName, dir)
|
||||
return util.UnmountMountPoint(dir, u.mounter, true) /* extensiveMountPointCheck = true */
|
||||
}
|
||||
|
||||
// localVolumeMapper implements the BlockVolumeMapper interface for local volumes.
|
||||
type localVolumeMapper struct {
|
||||
*localVolume
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeMapper = &localVolumeMapper{}
|
||||
|
||||
// SetUpDevice provides physical device path for the local PV.
|
||||
func (m *localVolumeMapper) SetUpDevice() (string, error) {
|
||||
globalPath := util.MakeAbsolutePath(runtime.GOOS, m.globalPath)
|
||||
glog.V(4).Infof("SetupDevice returning path %s", globalPath)
|
||||
return globalPath, nil
|
||||
}
|
||||
|
||||
func (m *localVolumeMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
|
||||
return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID)
|
||||
}
|
||||
|
||||
// localVolumeUnmapper implements the BlockVolumeUnmapper interface for local volumes.
|
||||
type localVolumeUnmapper struct {
|
||||
*localVolume
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeUnmapper = &localVolumeUnmapper{}
|
||||
|
||||
// TearDownDevice will undo SetUpDevice procedure. In local PV, all of this already handled by operation_generator.
|
||||
func (u *localVolumeUnmapper) TearDownDevice(mapPath, devicePath string) error {
|
||||
glog.V(4).Infof("local: TearDownDevice completed for: %s", mapPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetGlobalMapPath returns global map path and error.
|
||||
// path: plugins/kubernetes.io/kubernetes.io/local-volume/volumeDevices/{volumeName}
|
||||
func (lv *localVolume) GetGlobalMapPath(spec *volume.Spec) (string, error) {
|
||||
return filepath.Join(lv.plugin.host.GetVolumeDevicePluginDir(stringsutil.EscapeQualifiedNameForDisk(localVolumePluginName)),
|
||||
lv.volName), nil
|
||||
}
|
||||
|
||||
// GetPodDeviceMapPath returns pod device map path and volume name.
|
||||
// path: pods/{podUid}/volumeDevices/kubernetes.io~local-volume
|
||||
// volName: local-pv-ff0d6d4
|
||||
func (lv *localVolume) GetPodDeviceMapPath() (string, string) {
|
||||
return lv.plugin.host.GetPodVolumeDeviceDir(lv.podUID,
|
||||
stringsutil.EscapeQualifiedNameForDisk(localVolumePluginName)), lv.volName
|
||||
}
|
68
vendor/k8s.io/kubernetes/pkg/volume/local/local_linux_test.go
generated
vendored
Normal file
68
vendor/k8s.io/kubernetes/pkg/volume/local/local_linux_test.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
// +build linux darwin
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
func TestFSGroupMount(t *testing.T) {
|
||||
tmpDir, plug := getPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
info, err := os.Stat(tmpDir)
|
||||
if err != nil {
|
||||
t.Errorf("Error getting stats for %s (%v)", tmpDir, err)
|
||||
}
|
||||
s := info.Sys().(*syscall.Stat_t)
|
||||
if s == nil {
|
||||
t.Errorf("Error getting stats for %s (%v)", tmpDir, err)
|
||||
}
|
||||
fsGroup1 := int64(s.Gid)
|
||||
fsGroup2 := fsGroup1 + 1
|
||||
pod1 := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
pod1.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
FSGroup: &fsGroup1,
|
||||
}
|
||||
pod2 := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
pod2.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
FSGroup: &fsGroup2,
|
||||
}
|
||||
err = testFSGroupMount(plug, pod1, tmpDir, fsGroup1)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
err = testFSGroupMount(plug, pod2, tmpDir, fsGroup2)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
//Checking if GID of tmpDir has not been changed by mounting it by second pod
|
||||
s = info.Sys().(*syscall.Stat_t)
|
||||
if s == nil {
|
||||
t.Errorf("Error getting stats for %s (%v)", tmpDir, err)
|
||||
}
|
||||
if fsGroup1 != int64(s.Gid) {
|
||||
t.Errorf("Old Gid %d for volume %s got overwritten by new Gid %d", fsGroup1, tmpDir, int64(s.Gid))
|
||||
}
|
||||
}
|
505
vendor/k8s.io/kubernetes/pkg/volume/local/local_test.go
generated
vendored
Normal file
505
vendor/k8s.io/kubernetes/pkg/volume/local/local_test.go
generated
vendored
Normal file
@@ -0,0 +1,505 @@
|
||||
// +build linux darwin windows
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
const (
|
||||
testPVName = "pvA"
|
||||
testMountPath = "pods/poduid/volumes/kubernetes.io~local-volume/pvA"
|
||||
testGlobalPath = "plugins/kubernetes.io~local-volume/volumeDevices/pvA"
|
||||
testPodPath = "pods/poduid/volumeDevices/kubernetes.io~local-volume"
|
||||
testNodeName = "fakeNodeName"
|
||||
)
|
||||
|
||||
func getPlugin(t *testing.T) (string, volume.VolumePlugin) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("localVolumeTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName(localVolumePluginName)
|
||||
if err != nil {
|
||||
os.RemoveAll(tmpDir)
|
||||
t.Fatalf("Can't find the plugin by name")
|
||||
}
|
||||
if plug.GetPluginName() != localVolumePluginName {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
return tmpDir, plug
|
||||
}
|
||||
|
||||
func getBlockPlugin(t *testing.T) (string, volume.BlockVolumePlugin) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("localVolumeTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
plug, err := plugMgr.FindMapperPluginByName(localVolumePluginName)
|
||||
if err != nil {
|
||||
os.RemoveAll(tmpDir)
|
||||
t.Fatalf("Can't find the plugin by name: %q", localVolumePluginName)
|
||||
}
|
||||
if plug.GetPluginName() != localVolumePluginName {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
return tmpDir, plug
|
||||
}
|
||||
|
||||
func getPersistentPlugin(t *testing.T) (string, volume.PersistentVolumePlugin) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("localVolumeTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPersistentPluginByName(localVolumePluginName)
|
||||
if err != nil {
|
||||
os.RemoveAll(tmpDir)
|
||||
t.Fatalf("Can't find the plugin by name")
|
||||
}
|
||||
if plug.GetPluginName() != localVolumePluginName {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
return tmpDir, plug
|
||||
}
|
||||
|
||||
func getTestVolume(readOnly bool, path string, isBlock bool) *volume.Spec {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: testPVName,
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Local: &v1.LocalVolumeSource{
|
||||
Path: path,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if isBlock {
|
||||
blockMode := v1.PersistentVolumeBlock
|
||||
pv.Spec.VolumeMode = &blockMode
|
||||
}
|
||||
return volume.NewSpecFromPersistentVolume(pv, readOnly)
|
||||
}
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
tmpDir, plug := getPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
if !plug.CanSupport(getTestVolume(false, tmpDir, false)) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAccessModes(t *testing.T) {
|
||||
tmpDir, plug := getPersistentPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
modes := plug.GetAccessModes()
|
||||
if !volumetest.ContainsAccessMode(modes, v1.ReadWriteOnce) {
|
||||
t.Errorf("Expected AccessModeType %q", v1.ReadWriteOnce)
|
||||
}
|
||||
|
||||
if volumetest.ContainsAccessMode(modes, v1.ReadWriteMany) {
|
||||
t.Errorf("Found AccessModeType %q, expected not", v1.ReadWriteMany)
|
||||
}
|
||||
if volumetest.ContainsAccessMode(modes, v1.ReadOnlyMany) {
|
||||
t.Errorf("Found AccessModeType %q, expected not", v1.ReadOnlyMany)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetVolumeName(t *testing.T) {
|
||||
tmpDir, plug := getPersistentPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
volName, err := plug.GetVolumeName(getTestVolume(false, tmpDir, false))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get volume name: %v", err)
|
||||
}
|
||||
if volName != testPVName {
|
||||
t.Errorf("Expected volume name %q, got %q", testPVName, volName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidLocalPath(t *testing.T) {
|
||||
tmpDir, plug := getPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, err := plug.NewMounter(getTestVolume(false, "/no/backsteps/allowed/..", false), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = mounter.SetUp(nil)
|
||||
expectedMsg := "invalid path: /no/backsteps/allowed/.. must not contain '..'"
|
||||
if err.Error() != expectedMsg {
|
||||
t.Fatalf("expected error `%s` but got `%s`", expectedMsg, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMountUnmount(t *testing.T) {
|
||||
tmpDir, plug := getPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, err := plug.NewMounter(getTestVolume(false, tmpDir, false), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Fatalf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
volPath := path.Join(tmpDir, testMountPath)
|
||||
path := mounter.GetPath()
|
||||
if path != volPath {
|
||||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
// skip this check in windows since the "bind mount" logic is implemented differently in mount_wiondows.go
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unmounter, err := plug.NewUnmounter(testPVName, pod.UID)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Fatalf("Got a nil Unmounter")
|
||||
}
|
||||
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", path)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("TearDown() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMapUnmap tests block map and unmap interfaces.
|
||||
func TestMapUnmap(t *testing.T) {
|
||||
tmpDir, plug := getBlockPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
volSpec := getTestVolume(false, tmpDir, true /*isBlock*/)
|
||||
mapper, err := plug.NewBlockVolumeMapper(volSpec, pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mapper == nil {
|
||||
t.Fatalf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
expectedGlobalPath := path.Join(tmpDir, testGlobalPath)
|
||||
globalPath, err := mapper.GetGlobalMapPath(volSpec)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get global path: %v", err)
|
||||
}
|
||||
if globalPath != expectedGlobalPath {
|
||||
t.Errorf("Got unexpected path: %s, expected %s", globalPath, expectedGlobalPath)
|
||||
}
|
||||
expectedPodPath := path.Join(tmpDir, testPodPath)
|
||||
podPath, volName := mapper.GetPodDeviceMapPath()
|
||||
if podPath != expectedPodPath {
|
||||
t.Errorf("Got unexpected pod path: %s, expected %s", podPath, expectedPodPath)
|
||||
}
|
||||
if volName != testPVName {
|
||||
t.Errorf("Got unexpected volNamne: %s, expected %s", volName, testPVName)
|
||||
}
|
||||
devPath, err := mapper.SetUpDevice()
|
||||
if err != nil {
|
||||
t.Errorf("Failed to SetUpDevice, err: %v", err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(devPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUpDevice() failed, volume path not created: %s", devPath)
|
||||
} else {
|
||||
t.Errorf("SetUpDevice() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
unmapper, err := plug.NewBlockVolumeUnmapper(testPVName, pod.UID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Unmapper: %v", err)
|
||||
}
|
||||
if unmapper == nil {
|
||||
t.Fatalf("Got a nil Unmapper")
|
||||
}
|
||||
|
||||
if err := unmapper.TearDownDevice(globalPath, devPath); err != nil {
|
||||
t.Errorf("TearDownDevice failed, err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testFSGroupMount(plug volume.VolumePlugin, pod *v1.Pod, tmpDir string, fsGroup int64) error {
|
||||
mounter, err := plug.NewMounter(getTestVolume(false, tmpDir, false), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if mounter == nil {
|
||||
return fmt.Errorf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
volPath := path.Join(tmpDir, testMountPath)
|
||||
path := mounter.GetPath()
|
||||
if path != volPath {
|
||||
return fmt.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(&fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestConstructVolumeSpec(t *testing.T) {
|
||||
tmpDir, plug := getPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
volPath := path.Join(tmpDir, testMountPath)
|
||||
spec, err := plug.ConstructVolumeSpec(testPVName, volPath)
|
||||
if err != nil {
|
||||
t.Errorf("ConstructVolumeSpec() failed: %v", err)
|
||||
}
|
||||
if spec == nil {
|
||||
t.Fatalf("ConstructVolumeSpec() returned nil")
|
||||
}
|
||||
|
||||
volName := spec.Name()
|
||||
if volName != testPVName {
|
||||
t.Errorf("Expected volume name %q, got %q", testPVName, volName)
|
||||
}
|
||||
|
||||
if spec.Volume != nil {
|
||||
t.Errorf("Volume object returned, expected nil")
|
||||
}
|
||||
|
||||
pv := spec.PersistentVolume
|
||||
if pv == nil {
|
||||
t.Fatalf("PersistentVolume object nil")
|
||||
}
|
||||
|
||||
ls := pv.Spec.PersistentVolumeSource.Local
|
||||
if ls == nil {
|
||||
t.Fatalf("LocalVolumeSource object nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConstructBlockVolumeSpec(t *testing.T) {
|
||||
tmpDir, plug := getBlockPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
podPath := path.Join(tmpDir, testPodPath)
|
||||
spec, err := plug.ConstructBlockVolumeSpec(types.UID("poduid"), testPVName, podPath)
|
||||
if err != nil {
|
||||
t.Errorf("ConstructBlockVolumeSpec() failed: %v", err)
|
||||
}
|
||||
if spec == nil {
|
||||
t.Fatalf("ConstructBlockVolumeSpec() returned nil")
|
||||
}
|
||||
|
||||
volName := spec.Name()
|
||||
if volName != testPVName {
|
||||
t.Errorf("Expected volume name %q, got %q", testPVName, volName)
|
||||
}
|
||||
|
||||
if spec.Volume != nil {
|
||||
t.Errorf("Volume object returned, expected nil")
|
||||
}
|
||||
|
||||
pv := spec.PersistentVolume
|
||||
if pv == nil {
|
||||
t.Fatalf("PersistentVolume object nil")
|
||||
}
|
||||
|
||||
if spec.PersistentVolume.Spec.VolumeMode == nil {
|
||||
t.Fatalf("Volume mode has not been set.")
|
||||
}
|
||||
|
||||
if *spec.PersistentVolume.Spec.VolumeMode != v1.PersistentVolumeBlock {
|
||||
t.Errorf("Unexpected volume mode %q", *spec.PersistentVolume.Spec.VolumeMode)
|
||||
}
|
||||
|
||||
ls := pv.Spec.PersistentVolumeSource.Local
|
||||
if ls == nil {
|
||||
t.Fatalf("LocalVolumeSource object nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
||||
tmpDir, plug := getPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Read only == true
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, err := plug.NewMounter(getTestVolume(true, tmpDir, false), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Fatalf("Got a nil Mounter")
|
||||
}
|
||||
if !mounter.GetAttributes().ReadOnly {
|
||||
t.Errorf("Expected true for mounter.IsReadOnly")
|
||||
}
|
||||
|
||||
// Read only == false
|
||||
mounter, err = plug.NewMounter(getTestVolume(false, tmpDir, false), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Fatalf("Got a nil Mounter")
|
||||
}
|
||||
if mounter.GetAttributes().ReadOnly {
|
||||
t.Errorf("Expected false for mounter.IsReadOnly")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnsupportedPlugins(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("localVolumeTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
spec := getTestVolume(false, tmpDir, false)
|
||||
|
||||
recyclePlug, err := plugMgr.FindRecyclablePluginBySpec(spec)
|
||||
if err == nil && recyclePlug != nil {
|
||||
t.Errorf("Recyclable plugin found, expected none")
|
||||
}
|
||||
|
||||
deletePlug, err := plugMgr.FindDeletablePluginByName(localVolumePluginName)
|
||||
if err == nil && deletePlug != nil {
|
||||
t.Errorf("Deletable plugin found, expected none")
|
||||
}
|
||||
|
||||
attachPlug, err := plugMgr.FindAttachablePluginByName(localVolumePluginName)
|
||||
if err == nil && attachPlug != nil {
|
||||
t.Errorf("Attachable plugin found, expected none")
|
||||
}
|
||||
|
||||
createPlug, err := plugMgr.FindCreatablePluginBySpec(spec)
|
||||
if err == nil && createPlug != nil {
|
||||
t.Errorf("Creatable plugin found, expected none")
|
||||
}
|
||||
|
||||
provisionPlug, err := plugMgr.FindProvisionablePluginByName(localVolumePluginName)
|
||||
if err == nil && provisionPlug != nil {
|
||||
t.Errorf("Provisionable plugin found, expected none")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterPodMounts(t *testing.T) {
|
||||
tmpDir, plug := getPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, err := plug.NewMounter(getTestVolume(false, tmpDir, false), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
lvMounter, ok := mounter.(*localVolumeMounter)
|
||||
if !ok {
|
||||
t.Fatal("mounter is not localVolumeMounter")
|
||||
}
|
||||
|
||||
host := volumetest.NewFakeVolumeHost(tmpDir, nil, nil)
|
||||
podsDir := host.GetPodsDir()
|
||||
|
||||
cases := map[string]struct {
|
||||
input []string
|
||||
expected []string
|
||||
}{
|
||||
"empty": {
|
||||
[]string{},
|
||||
[]string{},
|
||||
},
|
||||
"not-pod-mount": {
|
||||
[]string{"/mnt/outside"},
|
||||
[]string{},
|
||||
},
|
||||
"pod-mount": {
|
||||
[]string{filepath.Join(podsDir, "pod-mount")},
|
||||
[]string{filepath.Join(podsDir, "pod-mount")},
|
||||
},
|
||||
"not-directory-prefix": {
|
||||
[]string{podsDir + "pod-mount"},
|
||||
[]string{},
|
||||
},
|
||||
"mix": {
|
||||
[]string{"/mnt/outside",
|
||||
filepath.Join(podsDir, "pod-mount"),
|
||||
"/another/outside",
|
||||
filepath.Join(podsDir, "pod-mount2")},
|
||||
[]string{filepath.Join(podsDir, "pod-mount"),
|
||||
filepath.Join(podsDir, "pod-mount2")},
|
||||
},
|
||||
}
|
||||
for name, test := range cases {
|
||||
output := lvMounter.filterPodMounts(test.input)
|
||||
if !reflect.DeepEqual(output, test.expected) {
|
||||
t.Errorf("%v failed: output %+v doesn't equal expected %+v", name, output, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user