Add generated file

This PR adds generated files under pkg/client and vendor folder.
This commit is contained in:
xing-yang
2018-07-12 10:55:15 -07:00
parent 36b1de0341
commit e213d1890d
17729 changed files with 5090889 additions and 0 deletions

71
vendor/k8s.io/kubernetes/pkg/volume/rbd/BUILD generated vendored Normal file
View File

@@ -0,0 +1,71 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"attacher.go",
"disk_manager.go",
"doc.go",
"rbd.go",
"rbd_util.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/rbd",
deps = [
"//pkg/features:go_default_library",
"//pkg/util/file:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/node:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["rbd_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

9
vendor/k8s.io/kubernetes/pkg/volume/rbd/OWNERS generated vendored Normal file
View File

@@ -0,0 +1,9 @@
approvers:
- rootfs
reviewers:
- sjenning
- saad-ali
- jsafrane
- rootfs
- jingxu97
- msau42

220
vendor/k8s.io/kubernetes/pkg/volume/rbd/attacher.go generated vendored Normal file
View File

@@ -0,0 +1,220 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"fmt"
"os"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volutil "k8s.io/kubernetes/pkg/volume/util"
)
// NewAttacher implements AttachableVolumePlugin.NewAttacher.
func (plugin *rbdPlugin) NewAttacher() (volume.Attacher, error) {
return plugin.newAttacherInternal(&RBDUtil{})
}
func (plugin *rbdPlugin) newAttacherInternal(manager diskManager) (volume.Attacher, error) {
return &rbdAttacher{
plugin: plugin,
manager: manager,
mounter: volutil.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
}, nil
}
// NewDetacher implements AttachableVolumePlugin.NewDetacher.
func (plugin *rbdPlugin) NewDetacher() (volume.Detacher, error) {
return plugin.newDetacherInternal(&RBDUtil{})
}
func (plugin *rbdPlugin) newDetacherInternal(manager diskManager) (volume.Detacher, error) {
return &rbdDetacher{
plugin: plugin,
manager: manager,
mounter: volutil.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
}, nil
}
// GetDeviceMountRefs implements AttachableVolumePlugin.GetDeviceMountRefs.
func (plugin *rbdPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
}
// rbdAttacher implements volume.Attacher interface.
type rbdAttacher struct {
plugin *rbdPlugin
mounter *mount.SafeFormatAndMount
manager diskManager
}
var _ volume.Attacher = &rbdAttacher{}
// Attach implements Attacher.Attach.
// We do not lock image here, because it requires kube-controller-manager to
// access external `rbd` utility. And there is no need since AttachDetach
// controller will not try to attach RWO volumes which are already attached to
// other nodes.
func (attacher *rbdAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
return "", nil
}
// VolumesAreAttached implements Attacher.VolumesAreAttached.
// There is no way to confirm whether the volume is attached or not from
// outside of the kubelet node. This method needs to return true always, like
// iSCSI, FC plugin.
func (attacher *rbdAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
volumesAttachedCheck := make(map[*volume.Spec]bool)
for _, spec := range specs {
volumesAttachedCheck[spec] = true
}
return volumesAttachedCheck, nil
}
// WaitForAttach implements Attacher.WaitForAttach. It's called by kublet to
// attach volume onto the node.
// This method is idempotent, callers are responsible for retrying on failure.
func (attacher *rbdAttacher) WaitForAttach(spec *volume.Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error) {
glog.V(4).Infof("rbd: waiting for attach volume (name: %s) for pod (name: %s, uid: %s)", spec.Name(), pod.Name, pod.UID)
mounter, err := attacher.plugin.createMounterFromVolumeSpecAndPod(spec, pod)
if err != nil {
glog.Warningf("failed to create mounter: %v", spec)
return "", err
}
realDevicePath, err := attacher.manager.AttachDisk(*mounter)
if err != nil {
return "", err
}
glog.V(3).Infof("rbd: successfully wait for attach volume (spec: %s, pool: %s, image: %s) at %s", spec.Name(), mounter.Pool, mounter.Image, realDevicePath)
return realDevicePath, nil
}
// GetDeviceMountPath implements Attacher.GetDeviceMountPath.
func (attacher *rbdAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
img, err := getVolumeSourceImage(spec)
if err != nil {
return "", err
}
pool, err := getVolumeSourcePool(spec)
if err != nil {
return "", err
}
return makePDNameInternal(attacher.plugin.host, pool, img), nil
}
// MountDevice implements Attacher.MountDevice. It is called by the kubelet to
// mount device at the given mount path.
// This method is idempotent, callers are responsible for retrying on failure.
func (attacher *rbdAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
glog.V(4).Infof("rbd: mouting device %s to %s", devicePath, deviceMountPath)
notMnt, err := attacher.mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
return err
}
notMnt = true
} else {
return err
}
}
if !notMnt {
return nil
}
fstype, err := getVolumeSourceFSType(spec)
if err != nil {
return err
}
ro, err := getVolumeSourceReadOnly(spec)
if err != nil {
return err
}
options := []string{}
if ro {
options = append(options, "ro")
}
mountOptions := volutil.MountOptionFromSpec(spec, options...)
err = attacher.mounter.FormatAndMount(devicePath, deviceMountPath, fstype, mountOptions)
if err != nil {
os.Remove(deviceMountPath)
return fmt.Errorf("rbd: failed to mount device %s at %s (fstype: %s), error %v", devicePath, deviceMountPath, fstype, err)
}
glog.V(3).Infof("rbd: successfully mount device %s at %s (fstype: %s)", devicePath, deviceMountPath, fstype)
return nil
}
// rbdDetacher implements volume.Detacher interface.
type rbdDetacher struct {
plugin *rbdPlugin
manager diskManager
mounter *mount.SafeFormatAndMount
}
var _ volume.Detacher = &rbdDetacher{}
// UnmountDevice implements Detacher.UnmountDevice. It unmounts the global
// mount of the RBD image. This is called once all bind mounts have been
// unmounted.
// Internally, it does four things:
// - Unmount device from deviceMountPath.
// - Detach device from the node.
// - Remove lock if found. (No need to check volume readonly or not, because
// device is not on the node anymore, it's safe to remove lock.)
// - Remove the deviceMountPath at last.
// This method is idempotent, callers are responsible for retrying on failure.
func (detacher *rbdDetacher) UnmountDevice(deviceMountPath string) error {
if pathExists, pathErr := volutil.PathExists(deviceMountPath); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", deviceMountPath)
return nil
}
devicePath, _, err := mount.GetDeviceNameFromMount(detacher.mounter, deviceMountPath)
if err != nil {
return err
}
// Unmount the device from the device mount point.
glog.V(4).Infof("rbd: unmouting device mountpoint %s", deviceMountPath)
if err = detacher.mounter.Unmount(deviceMountPath); err != nil {
return err
}
glog.V(3).Infof("rbd: successfully umount device mountpath %s", deviceMountPath)
glog.V(4).Infof("rbd: detaching device %s", devicePath)
err = detacher.manager.DetachDisk(detacher.plugin, deviceMountPath, devicePath)
if err != nil {
return err
}
glog.V(3).Infof("rbd: successfully detach device %s", devicePath)
err = os.Remove(deviceMountPath)
if err != nil {
return err
}
glog.V(3).Infof("rbd: successfully remove device mount point %s", deviceMountPath)
return nil
}
// Detach implements Detacher.Detach.
func (detacher *rbdDetacher) Detach(volumeName string, nodeName types.NodeName) error {
return nil
}

134
vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go generated vendored Normal file
View File

@@ -0,0 +1,134 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
// diskManager interface and diskSetup/TearDown functions abstract commonly used procedures to setup a block volume
// rbd volume implements diskManager, calls diskSetup when creating a volume, and calls diskTearDown inside volume unmounter.
// TODO: consolidate, refactor, and share diskManager among iSCSI, GCE PD, and RBD
//
package rbd
import (
"fmt"
"os"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
// Abstract interface to disk operations.
type diskManager interface {
// MakeGlobalPDName creates global persistent disk path.
MakeGlobalPDName(disk rbd) string
// MakeGlobalVDPDName creates global block disk path.
MakeGlobalVDPDName(disk rbd) string
// Attaches the disk to the kubelet's host machine.
// If it successfully attaches, the path to the device
// is returned. Otherwise, an error will be returned.
AttachDisk(disk rbdMounter) (string, error)
// Detaches the disk from the kubelet's host machine.
DetachDisk(plugin *rbdPlugin, deviceMountPath string, device string) error
// Detaches the block disk from the kubelet's host machine.
DetachBlockDisk(disk rbdDiskUnmapper, mntPath string) error
// Creates a rbd image.
CreateImage(provisioner *rbdVolumeProvisioner) (r *v1.RBDPersistentVolumeSource, volumeSizeGB int, err error)
// Deletes a rbd image.
DeleteImage(deleter *rbdVolumeDeleter) error
// Expands a rbd image
ExpandImage(expander *rbdVolumeExpander, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error)
}
// utility to mount a disk based filesystem
func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount.Interface, fsGroup *int64) error {
globalPDPath := manager.MakeGlobalPDName(*b.rbd)
notMnt, err := mounter.IsLikelyNotMountPoint(globalPDPath)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate mountpoint: %s", globalPDPath)
return err
}
if notMnt {
return fmt.Errorf("no device is mounted at %s", globalPDPath)
}
notMnt, err = mounter.IsLikelyNotMountPoint(volPath)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate mountpoint: %s", volPath)
return err
}
if !notMnt {
return nil
}
if err := os.MkdirAll(volPath, 0750); err != nil {
glog.Errorf("failed to mkdir:%s", volPath)
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same disk.
options := []string{"bind"}
if (&b).GetAttributes().ReadOnly {
options = append(options, "ro")
}
mountOptions := util.JoinMountOptions(b.mountOptions, options)
err = mounter.Mount(globalPDPath, volPath, "", mountOptions)
if err != nil {
glog.Errorf("failed to bind mount:%s", globalPDPath)
return err
}
glog.V(3).Infof("rbd: successfully bind mount %s to %s with options %v", globalPDPath, volPath, mountOptions)
if !b.ReadOnly {
volume.SetVolumeOwnership(&b, fsGroup)
}
return nil
}
// utility to tear down a disk based filesystem
func diskTearDown(manager diskManager, c rbdUnmounter, volPath string, mounter mount.Interface) error {
notMnt, err := mounter.IsLikelyNotMountPoint(volPath)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate mountpoint: %s", volPath)
return err
}
if notMnt {
glog.V(3).Infof("volume path %s is not a mountpoint, deleting", volPath)
return os.Remove(volPath)
}
// Unmount the bind-mount inside this pod.
if err := mounter.Unmount(volPath); err != nil {
glog.Errorf("failed to umount %s", volPath)
return err
}
notMnt, mntErr := mounter.IsLikelyNotMountPoint(volPath)
if mntErr != nil && !os.IsNotExist(mntErr) {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return mntErr
}
if notMnt {
if err := os.Remove(volPath); err != nil {
glog.V(2).Info("Error removing mountpoint ", volPath, ": ", err)
return err
}
}
return nil
}

19
vendor/k8s.io/kubernetes/pkg/volume/rbd/doc.go generated vendored Normal file
View File

@@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package rbd contains the internal representation of Rados Block Store (Ceph)
// volumes.
package rbd // import "k8s.io/kubernetes/pkg/volume/rbd"

1138
vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

596
vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_test.go generated vendored Normal file
View File

@@ -0,0 +1,596 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"fmt"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"sync"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/rbd" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if plug.CanSupport(&volume.Spec{}) {
t.Errorf("Expected false")
}
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{RBD: &v1.RBDVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{}}}) {
t.Errorf("Expected false")
}
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{}}}}) {
t.Errorf("Expected false")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{RBD: &v1.RBDPersistentVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
type fakeDiskManager struct {
// Make sure we can run tests in parallel.
mutex sync.RWMutex
// Key format: "<pool>/<image>"
rbdImageLocks map[string]bool
rbdMapIndex int
rbdDevices map[string]bool
}
func NewFakeDiskManager() *fakeDiskManager {
return &fakeDiskManager{
rbdImageLocks: make(map[string]bool),
rbdMapIndex: 0,
rbdDevices: make(map[string]bool),
}
}
func (fake *fakeDiskManager) MakeGlobalPDName(rbd rbd) string {
return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image)
}
func (fake *fakeDiskManager) MakeGlobalVDPDName(rbd rbd) string {
return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image)
}
func (fake *fakeDiskManager) AttachDisk(b rbdMounter) (string, error) {
fake.mutex.Lock()
defer fake.mutex.Unlock()
fake.rbdMapIndex++
devicePath := fmt.Sprintf("/dev/rbd%d", fake.rbdMapIndex)
fake.rbdDevices[devicePath] = true
return devicePath, nil
}
func (fake *fakeDiskManager) DetachDisk(r *rbdPlugin, deviceMountPath string, device string) error {
fake.mutex.Lock()
defer fake.mutex.Unlock()
ok := fake.rbdDevices[device]
if !ok {
return fmt.Errorf("rbd: failed to detach device %s, it does not exist", device)
}
delete(fake.rbdDevices, device)
return nil
}
func (fake *fakeDiskManager) DetachBlockDisk(r rbdDiskUnmapper, device string) error {
fake.mutex.Lock()
defer fake.mutex.Unlock()
ok := fake.rbdDevices[device]
if !ok {
return fmt.Errorf("rbd: failed to detach device %s, it does not exist", device)
}
delete(fake.rbdDevices, device)
return nil
}
func (fake *fakeDiskManager) CreateImage(provisioner *rbdVolumeProvisioner) (r *v1.RBDPersistentVolumeSource, volumeSizeGB int, err error) {
return nil, 0, fmt.Errorf("not implemented")
}
func (fake *fakeDiskManager) DeleteImage(deleter *rbdVolumeDeleter) error {
return fmt.Errorf("not implemented")
}
func (fake *fakeDiskManager) Fencing(r rbdMounter, nodeName string) error {
fake.mutex.Lock()
defer fake.mutex.Unlock()
key := fmt.Sprintf("%s/%s", r.Pool, r.Image)
isLocked, ok := fake.rbdImageLocks[key]
if ok && isLocked {
// not expected in testing
return fmt.Errorf("%s is already locked", key)
}
fake.rbdImageLocks[key] = true
return nil
}
func (fake *fakeDiskManager) Defencing(r rbdMounter, nodeName string) error {
fake.mutex.Lock()
defer fake.mutex.Unlock()
key := fmt.Sprintf("%s/%s", r.Pool, r.Image)
isLocked, ok := fake.rbdImageLocks[key]
if !ok || !isLocked {
// not expected in testing
return fmt.Errorf("%s is not locked", key)
}
delete(fake.rbdImageLocks, key)
return nil
}
func (fake *fakeDiskManager) IsLocked(r rbdMounter, nodeName string) (bool, error) {
fake.mutex.RLock()
defer fake.mutex.RUnlock()
key := fmt.Sprintf("%s/%s", r.Pool, r.Image)
isLocked, ok := fake.rbdImageLocks[key]
return ok && isLocked, nil
}
func (fake *fakeDiskManager) ExpandImage(rbdExpander *rbdVolumeExpander, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
return resource.Quantity{}, fmt.Errorf("not implemented")
}
// checkMounterLog checks fakeMounter must have expected logs, and the last action msut equal to expectedAction.
func checkMounterLog(t *testing.T, fakeMounter *mount.FakeMounter, expected int, expectedAction mount.FakeAction) {
if len(fakeMounter.Log) != expected {
t.Fatalf("fakeMounter should have %d logs, actual: %d", expected, len(fakeMounter.Log))
}
lastIndex := len(fakeMounter.Log) - 1
lastAction := fakeMounter.Log[lastIndex]
if !reflect.DeepEqual(expectedAction, lastAction) {
t.Fatalf("fakeMounter.Log[%d] should be %#v, not: %#v", lastIndex, expectedAction, lastAction)
}
}
func doTestPlugin(t *testing.T, c *testcase) {
fakeVolumeHost := volumetest.NewFakeVolumeHost(c.root, nil, nil)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, fakeVolumeHost)
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
fakeMounter := fakeVolumeHost.GetMounter(plug.GetPluginName()).(*mount.FakeMounter)
fakeNodeName := types.NodeName("localhost")
fdm := NewFakeDiskManager()
// attacher
attacher, err := plug.(*rbdPlugin).newAttacherInternal(fdm)
if err != nil {
t.Errorf("Failed to make a new Attacher: %v", err)
}
deviceAttachPath, err := attacher.Attach(c.spec, fakeNodeName)
if err != nil {
t.Fatal(err)
}
devicePath, err := attacher.WaitForAttach(c.spec, deviceAttachPath, c.pod, time.Second*10)
if err != nil {
t.Fatal(err)
}
if devicePath != c.expectedDevicePath {
t.Errorf("Unexpected path, expected %q, not: %q", c.expectedDevicePath, devicePath)
}
deviceMountPath, err := attacher.GetDeviceMountPath(c.spec)
if err != nil {
t.Fatal(err)
}
if deviceMountPath != c.expectedDeviceMountPath {
t.Errorf("Unexpected mount path, expected %q, not: %q", c.expectedDeviceMountPath, deviceMountPath)
}
err = attacher.MountDevice(c.spec, devicePath, deviceMountPath)
if err != nil {
t.Fatal(err)
}
if _, err := os.Stat(deviceMountPath); err != nil {
if os.IsNotExist(err) {
t.Errorf("Attacher.MountDevice() failed, device mount path not created: %s", deviceMountPath)
} else {
t.Errorf("Attacher.MountDevice() failed: %v", err)
}
}
checkMounterLog(t, fakeMounter, 1, mount.FakeAction{Action: "mount", Target: c.expectedDeviceMountPath, Source: devicePath, FSType: "ext4"})
// mounter
mounter, err := plug.(*rbdPlugin).newMounterInternal(c.spec, c.pod.UID, fdm, "secrets")
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Error("Got a nil Mounter")
}
path := mounter.GetPath()
if path != c.expectedPodMountPath {
t.Errorf("Unexpected path, expected %q, got: %q", c.expectedPodMountPath, path)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
checkMounterLog(t, fakeMounter, 2, mount.FakeAction{Action: "mount", Target: c.expectedPodMountPath, Source: devicePath, FSType: ""})
// unmounter
unmounter, err := plug.(*rbdPlugin).newUnmounterInternal(c.spec.Name(), c.pod.UID, fdm)
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Error("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
checkMounterLog(t, fakeMounter, 3, mount.FakeAction{Action: "unmount", Target: c.expectedPodMountPath, Source: "", FSType: ""})
// detacher
detacher, err := plug.(*rbdPlugin).newDetacherInternal(fdm)
if err != nil {
t.Errorf("Failed to make a new Attacher: %v", err)
}
err = detacher.UnmountDevice(deviceMountPath)
if err != nil {
t.Fatalf("Detacher.UnmountDevice failed to unmount %s", deviceMountPath)
}
checkMounterLog(t, fakeMounter, 4, mount.FakeAction{Action: "unmount", Target: c.expectedDeviceMountPath, Source: "", FSType: ""})
err = detacher.Detach(deviceMountPath, fakeNodeName)
if err != nil {
t.Fatalf("Detacher.Detach failed to detach %s from %s", deviceMountPath, fakeNodeName)
}
}
type testcase struct {
spec *volume.Spec
root string
pod *v1.Pod
expectedDevicePath string
expectedDeviceMountPath string
expectedPodMountPath string
}
func TestPlugin(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
tmpDir, err = filepath.EvalSymlinks(tmpDir)
if err != nil {
t.Fatal(err)
}
podUID := uuid.NewUUID()
var cases []*testcase
cases = append(cases, &testcase{
spec: volume.NewSpecFromVolume(&v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{"a", "b"},
RBDPool: "pool1",
RBDImage: "image1",
FSType: "ext4",
},
},
}),
root: tmpDir,
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testpod",
Namespace: "testns",
UID: podUID,
},
},
expectedDevicePath: "/dev/rbd1",
expectedDeviceMountPath: fmt.Sprintf("%s/plugins/kubernetes.io/rbd/mounts/pool1-image-image1", tmpDir),
expectedPodMountPath: fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~rbd/vol1", tmpDir, podUID),
})
cases = append(cases, &testcase{
spec: volume.NewSpecFromPersistentVolume(&v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "vol2",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
RBD: &v1.RBDPersistentVolumeSource{
CephMonitors: []string{"a", "b"},
RBDPool: "pool2",
RBDImage: "image2",
FSType: "ext4",
},
},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany},
},
}, false),
root: tmpDir,
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testpod",
Namespace: "testns",
UID: podUID,
},
},
expectedDevicePath: "/dev/rbd1",
expectedDeviceMountPath: fmt.Sprintf("%s/plugins/kubernetes.io/rbd/mounts/pool2-image-image2", tmpDir),
expectedPodMountPath: fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~rbd/vol2", tmpDir, podUID),
})
for i := 0; i < len(cases); i++ {
doTestPlugin(t, cases[i])
}
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
RBD: &v1.RBDPersistentVolumeSource{
CephMonitors: []string{"a", "b"},
RBDImage: "bar",
FSType: "ext4",
},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
client := fake.NewSimpleClientset(pv, claim)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(rbdPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if mounter == nil {
t.Fatalf("Got a nil Mounter")
}
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
func TestGetSecretNameAndNamespace(t *testing.T) {
secretName := "test-secret-name"
secretNamespace := "test-secret-namespace"
volSpec := &volume.Spec{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
RBD: &v1.RBDPersistentVolumeSource{
CephMonitors: []string{"a", "b"},
RBDImage: "bar",
FSType: "ext4",
},
},
},
},
}
secretRef := new(v1.SecretReference)
secretRef.Name = secretName
secretRef.Namespace = secretNamespace
volSpec.PersistentVolume.Spec.PersistentVolumeSource.RBD.SecretRef = secretRef
foundSecretName, foundSecretNamespace, err := getSecretNameAndNamespace(volSpec, "default")
if err != nil {
t.Errorf("getSecretNameAndNamespace failed to get Secret's name and namespace: %v", err)
}
if strings.Compare(secretName, foundSecretName) != 0 || strings.Compare(secretNamespace, foundSecretNamespace) != 0 {
t.Errorf("getSecretNameAndNamespace returned incorrect values, expected %s and %s but got %s and %s", secretName, secretNamespace, foundSecretName, foundSecretNamespace)
}
}
// https://github.com/kubernetes/kubernetes/issues/57744
func TestGetDeviceMountPath(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
fakeVolumeHost := volumetest.NewFakeVolumeHost(tmpDir, nil, nil)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, fakeVolumeHost)
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
fdm := NewFakeDiskManager()
// attacher
attacher, err := plug.(*rbdPlugin).newAttacherInternal(fdm)
if err != nil {
t.Errorf("Failed to make a new Attacher: %v", err)
}
pool, image := "pool", "image"
spec := volume.NewSpecFromVolume(&v1.Volume{
Name: "vol",
VolumeSource: v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{"a", "b"},
RBDPool: pool,
RBDImage: image,
FSType: "ext4",
},
},
})
deprecatedDir := fmt.Sprintf("%s/plugins/kubernetes.io/rbd/rbd/%s-image-%s", tmpDir, pool, image)
canonicalDir := fmt.Sprintf("%s/plugins/kubernetes.io/rbd/mounts/%s-image-%s", tmpDir, pool, image)
type testCase struct {
deprecated bool
targetPath string
}
for _, c := range []testCase{
{false, canonicalDir},
{true, deprecatedDir},
} {
if c.deprecated {
// This is a deprecated device mount path, we create it,
// and hope attacher.GetDeviceMountPath return c.targetPath.
if err := os.MkdirAll(c.targetPath, 0700); err != nil {
t.Fatalf("Create deprecated mount path failed: %v", err)
}
}
mountPath, err := attacher.GetDeviceMountPath(spec)
if err != nil {
t.Fatalf("GetDeviceMountPath failed: %v", err)
}
if mountPath != c.targetPath {
t.Errorf("Mismatch device mount path: wanted %s, got %s", c.targetPath, mountPath)
}
}
}
// https://github.com/kubernetes/kubernetes/issues/57744
func TestConstructVolumeSpec(t *testing.T) {
if runtime.GOOS == "darwin" {
t.Skipf("TestConstructVolumeSpec is not supported on GOOS=%s", runtime.GOOS)
}
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
fakeVolumeHost := volumetest.NewFakeVolumeHost(tmpDir, nil, nil)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, fakeVolumeHost)
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
fakeMounter := fakeVolumeHost.GetMounter(plug.GetPluginName()).(*mount.FakeMounter)
pool, image, volumeName := "pool", "image", "vol"
podMountPath := fmt.Sprintf("%s/pods/pod123/volumes/kubernetes.io~rbd/%s", tmpDir, volumeName)
deprecatedDir := fmt.Sprintf("%s/plugins/kubernetes.io/rbd/rbd/%s-image-%s", tmpDir, pool, image)
canonicalDir := fmt.Sprintf("%s/plugins/kubernetes.io/rbd/mounts/%s-image-%s", tmpDir, pool, image)
type testCase struct {
volumeName string
targetPath string
}
for _, c := range []testCase{
{"vol", canonicalDir},
{"vol", deprecatedDir},
} {
if err := os.MkdirAll(c.targetPath, 0700); err != nil {
t.Fatalf("Create mount path %s failed: %v", c.targetPath, err)
}
if err = fakeMounter.Mount("/dev/rbd0", c.targetPath, "fake", nil); err != nil {
t.Fatalf("Mount %s to %s failed: %v", c.targetPath, podMountPath, err)
}
if err = fakeMounter.Mount(c.targetPath, podMountPath, "fake", []string{"bind"}); err != nil {
t.Fatalf("Mount %s to %s failed: %v", c.targetPath, podMountPath, err)
}
spec, err := plug.ConstructVolumeSpec(c.volumeName, podMountPath)
if err != nil {
t.Errorf("ConstructVolumeSpec failed: %v", err)
} else {
if spec.Volume.RBD.RBDPool != pool {
t.Errorf("Mismatch rbd pool: wanted %s, got %s", pool, spec.Volume.RBD.RBDPool)
}
if spec.Volume.RBD.RBDImage != image {
t.Fatalf("Mismatch rbd image: wanted %s, got %s", image, spec.Volume.RBD.RBDImage)
}
}
if err = fakeMounter.Unmount(podMountPath); err != nil {
t.Fatalf("Unmount pod path %s failed: %v", podMountPath, err)
}
if err = fakeMounter.Unmount(c.targetPath); err != nil {
t.Fatalf("Unmount device path %s failed: %v", c.targetPath, err)
}
}
}

785
vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go generated vendored Normal file
View File

@@ -0,0 +1,785 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
// utility functions to setup rbd volume
// mainly implement diskManager interface
//
package rbd
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"strconv"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
fileutil "k8s.io/kubernetes/pkg/util/file"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/volume"
volutil "k8s.io/kubernetes/pkg/volume/util"
)
const (
imageWatcherStr = "watcher="
imageSizeStr = "size "
sizeDivStr = " MB in"
kubeLockMagic = "kubelet_lock_magic_"
// The following three values are used for 30 seconds timeout
// while waiting for RBD Watcher to expire.
rbdImageWatcherInitDelay = 1 * time.Second
rbdImageWatcherFactor = 1.4
rbdImageWatcherSteps = 10
)
func getDevFromImageAndPool(pool, image string) (string, bool) {
device, found := getRbdDevFromImageAndPool(pool, image)
if found {
return device, true
}
device, found = getNbdDevFromImageAndPool(pool, image)
if found {
return device, true
}
return "", false
}
// Search /sys/bus for rbd device that matches given pool and image.
func getRbdDevFromImageAndPool(pool string, image string) (string, bool) {
// /sys/bus/rbd/devices/X/name and /sys/bus/rbd/devices/X/pool
sys_path := "/sys/bus/rbd/devices"
if dirs, err := ioutil.ReadDir(sys_path); err == nil {
for _, f := range dirs {
// Pool and name format:
// see rbd_pool_show() and rbd_name_show() at
// https://github.com/torvalds/linux/blob/master/drivers/block/rbd.c
name := f.Name()
// First match pool, then match name.
poolFile := path.Join(sys_path, name, "pool")
poolBytes, err := ioutil.ReadFile(poolFile)
if err != nil {
glog.V(4).Infof("error reading %s: %v", poolFile, err)
continue
}
if strings.TrimSpace(string(poolBytes)) != pool {
glog.V(4).Infof("device %s is not %q: %q", name, pool, string(poolBytes))
continue
}
imgFile := path.Join(sys_path, name, "name")
imgBytes, err := ioutil.ReadFile(imgFile)
if err != nil {
glog.V(4).Infof("error reading %s: %v", imgFile, err)
continue
}
if strings.TrimSpace(string(imgBytes)) != image {
glog.V(4).Infof("device %s is not %q: %q", name, image, string(imgBytes))
continue
}
// Found a match, check if device exists.
devicePath := "/dev/rbd" + name
if _, err := os.Lstat(devicePath); err == nil {
return devicePath, true
}
}
}
return "", false
}
func getMaxNbds() (int, error) {
// the max number of nbd devices may be found in maxNbdsPath
// we will check sysfs for possible nbd devices even if this is not available
maxNbdsPath := "/sys/module/nbd/parameters/nbds_max"
_, err := os.Lstat(maxNbdsPath)
if err != nil {
return 0, fmt.Errorf("rbd-nbd: failed to retrieve max_nbds from %s err: %q", maxNbdsPath, err)
}
glog.V(4).Infof("found nbds max parameters file at %s", maxNbdsPath)
maxNbdBytes, err := ioutil.ReadFile(maxNbdsPath)
if err != nil {
return 0, fmt.Errorf("rbd-nbd: failed to read max_nbds from %s err: %q", maxNbdsPath, err)
}
maxNbds, err := strconv.Atoi(strings.TrimSpace(string(maxNbdBytes)))
if err != nil {
return 0, fmt.Errorf("rbd-nbd: failed to read max_nbds err: %q", err)
}
glog.V(4).Infof("rbd-nbd: max_nbds: %d", maxNbds)
return maxNbds, nil
}
// Locate any existing rbd-nbd process mapping given a <pool, image>.
// Recent versions of rbd-nbd tool can correctly provide this info using list-mapped
// but older versions of list-mapped don't.
// The implementation below peeks at the command line of nbd bound processes
// to figure out any mapped images.
func getNbdDevFromImageAndPool(pool string, image string) (string, bool) {
// nbd module exports the pid of serving process in sysfs
basePath := "/sys/block/nbd"
// Do not change imgPath format - some tools like rbd-nbd are strict about it.
imgPath := fmt.Sprintf("%s/%s", pool, image)
maxNbds, maxNbdsErr := getMaxNbds()
if maxNbdsErr != nil {
glog.V(4).Infof("error reading nbds_max %v", maxNbdsErr)
return "", false
}
for i := 0; i < maxNbds; i++ {
nbdPath := basePath + strconv.Itoa(i)
_, err := os.Lstat(nbdPath)
if err != nil {
glog.V(4).Infof("error reading nbd info directory %s: %v", nbdPath, err)
continue
}
pidBytes, err := ioutil.ReadFile(path.Join(nbdPath, "pid"))
if err != nil {
glog.V(5).Infof("did not find valid pid file in dir %s: %v", nbdPath, err)
continue
}
cmdlineFileName := path.Join("/proc", strings.TrimSpace(string(pidBytes)), "cmdline")
rawCmdline, err := ioutil.ReadFile(cmdlineFileName)
if err != nil {
glog.V(4).Infof("failed to read cmdline file %s: %v", cmdlineFileName, err)
continue
}
cmdlineArgs := strings.FieldsFunc(string(rawCmdline), func(r rune) bool {
return r == '\u0000'
})
// Check if this process is mapping a rbd device.
// Only accepted pattern of cmdline is from execRbdMap:
// rbd-nbd map pool/image ...
if len(cmdlineArgs) < 3 || cmdlineArgs[0] != "rbd-nbd" || cmdlineArgs[1] != "map" {
glog.V(4).Infof("nbd device %s is not used by rbd", nbdPath)
continue
}
if cmdlineArgs[2] != imgPath {
glog.V(4).Infof("rbd-nbd device %s did not match expected image path: %s with path found: %s",
nbdPath, imgPath, cmdlineArgs[2])
continue
}
devicePath := path.Join("/dev", "nbd"+strconv.Itoa(i))
if _, err := os.Lstat(devicePath); err != nil {
glog.Warningf("Stat device %s for imgpath %s failed %v", devicePath, imgPath, err)
continue
}
return devicePath, true
}
return "", false
}
// Stat a path, if it doesn't exist, retry maxRetries times.
func waitForPath(pool, image string, maxRetries int, useNbdDriver bool) (string, bool) {
for i := 0; i < maxRetries; i++ {
if i != 0 {
time.Sleep(time.Second)
}
if useNbdDriver {
if devicePath, found := getNbdDevFromImageAndPool(pool, image); found {
return devicePath, true
}
} else {
if devicePath, found := getRbdDevFromImageAndPool(pool, image); found {
return devicePath, true
}
}
}
return "", false
}
// Execute command to map a rbd device for mounter.
// rbdCmd is driver dependent and either "rbd" or "rbd-nbd".
func execRbdMap(b rbdMounter, rbdCmd string, mon string) ([]byte, error) {
// Commandline: rbdCmd map imgPath ...
// do not change this format - some tools like rbd-nbd are strict about it.
imgPath := fmt.Sprintf("%s/%s", b.Pool, b.Image)
if b.Secret != "" {
return b.exec.Run(rbdCmd,
"map", imgPath, "--id", b.Id, "-m", mon, "--key="+b.Secret)
} else {
return b.exec.Run(rbdCmd,
"map", imgPath, "--id", b.Id, "-m", mon, "-k", b.Keyring)
}
}
// Check if rbd-nbd tools are installed.
func checkRbdNbdTools(e mount.Exec) bool {
_, err := e.Run("modprobe", "nbd")
if err != nil {
glog.V(5).Infof("rbd-nbd: nbd modprobe failed with error %v", err)
return false
}
if _, err := e.Run("rbd-nbd", "--version"); err != nil {
glog.V(5).Infof("rbd-nbd: getting rbd-nbd version failed with error %v", err)
return false
}
glog.V(3).Infof("rbd-nbd tools were found.")
return true
}
// Make a directory like /var/lib/kubelet/plugins/kubernetes.io/rbd/mounts/pool-image-image.
func makePDNameInternal(host volume.VolumeHost, pool string, image string) string {
// Backward compatibility for the deprecated format: /var/lib/kubelet/plugins/kubernetes.io/rbd/rbd/pool-image-image.
deprecatedDir := path.Join(host.GetPluginDir(rbdPluginName), "rbd", pool+"-image-"+image)
info, err := os.Stat(deprecatedDir)
if err == nil && info.IsDir() {
// The device mount path has already been created with the deprecated format, return it.
glog.V(5).Infof("Deprecated format path %s found", deprecatedDir)
return deprecatedDir
}
// Return the canonical format path.
return path.Join(host.GetPluginDir(rbdPluginName), mount.MountsInGlobalPDPath, pool+"-image-"+image)
}
// Make a directory like /var/lib/kubelet/plugins/kubernetes.io/rbd/volumeDevices/pool-image-image.
func makeVDPDNameInternal(host volume.VolumeHost, pool string, image string) string {
return path.Join(host.GetVolumeDevicePluginDir(rbdPluginName), pool+"-image-"+image)
}
// RBDUtil implements diskManager interface.
type RBDUtil struct{}
var _ diskManager = &RBDUtil{}
func (util *RBDUtil) MakeGlobalPDName(rbd rbd) string {
return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image)
}
func (util *RBDUtil) MakeGlobalVDPDName(rbd rbd) string {
return makeVDPDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image)
}
func rbdErrors(runErr, resultErr error) error {
if err, ok := runErr.(*exec.Error); ok {
if err.Err == exec.ErrNotFound {
return fmt.Errorf("rbd: rbd cmd not found")
}
}
return resultErr
}
// 'rbd' utility builds a comma-separated list of monitor addresses from '-m' /
// '--mon_host` parameter (comma, semi-colon, or white-space delimited monitor
// addresses) and send it to kernel rbd/libceph modules, which can accept
// comma-seprated list of monitor addresses (e.g. ip1[:port1][,ip2[:port2]...])
// in their first version in linux (see
// https://github.com/torvalds/linux/blob/602adf400201636e95c3fed9f31fba54a3d7e844/net/ceph/ceph_common.c#L239).
// Also, libceph module chooses monitor randomly, so we can simply pass all
// addresses without randomization (see
// https://github.com/torvalds/linux/blob/602adf400201636e95c3fed9f31fba54a3d7e844/net/ceph/mon_client.c#L132).
func (util *RBDUtil) kernelRBDMonitorsOpt(mons []string) string {
return strings.Join(mons, ",")
}
// rbdUnlock releases a lock on image if found.
func (util *RBDUtil) rbdUnlock(b rbdMounter) error {
var err error
var output, locker string
var cmd []byte
var secret_opt []string
if b.Secret != "" {
secret_opt = []string{"--key=" + b.Secret}
} else {
secret_opt = []string{"-k", b.Keyring}
}
if len(b.adminId) == 0 {
b.adminId = b.Id
}
if len(b.adminSecret) == 0 {
b.adminSecret = b.Secret
}
// Construct lock id using host name and a magic prefix.
lock_id := kubeLockMagic + node.GetHostname("")
mon := util.kernelRBDMonitorsOpt(b.Mon)
// Get the locker name, something like "client.1234".
args := []string{"lock", "list", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon}
args = append(args, secret_opt...)
cmd, err = b.exec.Run("rbd", args...)
output = string(cmd)
glog.V(4).Infof("lock list output %q", output)
if err != nil {
return err
}
ind := strings.LastIndex(output, lock_id) - 1
for i := ind; i >= 0; i-- {
if output[i] == '\n' {
locker = output[(i + 1):ind]
break
}
}
// Remove a lock if found: rbd lock remove.
if len(locker) > 0 {
args := []string{"lock", "remove", b.Image, lock_id, locker, "--pool", b.Pool, "--id", b.Id, "-m", mon}
args = append(args, secret_opt...)
cmd, err = b.exec.Run("rbd", args...)
if err == nil {
glog.V(4).Infof("rbd: successfully remove lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon)
} else {
glog.Warningf("rbd: failed to remove lock (lock_id: %s) on image: %s/%s with id %s mon %s: %v", lock_id, b.Pool, b.Image, b.Id, mon, err)
}
}
return err
}
// AttachDisk attaches the disk on the node.
func (util *RBDUtil) AttachDisk(b rbdMounter) (string, error) {
var output []byte
globalPDPath := util.MakeGlobalPDName(*b.rbd)
if pathExists, pathErr := volutil.PathExists(globalPDPath); pathErr != nil {
return "", fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
return "", err
}
}
// Evalute whether this device was mapped with rbd.
devicePath, mapped := waitForPath(b.Pool, b.Image, 1 /*maxRetries*/, false /*useNbdDriver*/)
// If rbd-nbd tools are found, we will fallback to it should the default krbd driver fail.
nbdToolsFound := false
if !mapped {
nbdToolsFound = checkRbdNbdTools(b.exec)
if nbdToolsFound {
devicePath, mapped = waitForPath(b.Pool, b.Image, 1 /*maxRetries*/, true /*useNbdDriver*/)
}
}
if !mapped {
// Currently, we don't acquire advisory lock on image, but for backward
// compatibility, we need to check if the image is being used by nodes running old kubelet.
// osd_client_watch_timeout defaults to 30 seconds, if the watcher stays active longer than 30 seconds,
// rbd image does not get mounted and failure message gets generated.
backoff := wait.Backoff{
Duration: rbdImageWatcherInitDelay,
Factor: rbdImageWatcherFactor,
Steps: rbdImageWatcherSteps,
}
needValidUsed := true
// If accessModes contain ReadOnlyMany, we don't need check rbd status of being used.
if b.accessModes != nil {
for _, v := range b.accessModes {
if v != v1.ReadWriteOnce {
needValidUsed = false
break
}
}
}
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
used, rbdOutput, err := util.rbdStatus(&b)
if err != nil {
return false, fmt.Errorf("fail to check rbd image status with: (%v), rbd output: (%s)", err, rbdOutput)
}
return !needValidUsed || !used, nil
})
// Return error if rbd image has not become available for the specified timeout.
if err == wait.ErrWaitTimeout {
return "", fmt.Errorf("rbd image %s/%s is still being used", b.Pool, b.Image)
}
// Return error if any other errors were encountered during wating for the image to become available.
if err != nil {
return "", err
}
mon := util.kernelRBDMonitorsOpt(b.Mon)
glog.V(1).Infof("rbd: map mon %s", mon)
_, err = b.exec.Run("modprobe", "rbd")
if err != nil {
glog.Warningf("rbd: failed to load rbd kernel module:%v", err)
}
output, err = execRbdMap(b, "rbd", mon)
if err != nil {
if !nbdToolsFound {
glog.V(1).Infof("rbd: map error %v, rbd output: %s", err, string(output))
return "", fmt.Errorf("rbd: map failed %v, rbd output: %s", err, string(output))
}
glog.V(3).Infof("rbd: map failed with %v, %s. Retrying with rbd-nbd", err, string(output))
errList := []error{err}
outputList := output
output, err = execRbdMap(b, "rbd-nbd", mon)
if err != nil {
errList = append(errList, err)
outputList = append(outputList, output...)
return "", fmt.Errorf("rbd: map failed %v, rbd output: %s", errors.NewAggregate(errList), string(outputList))
}
devicePath, mapped = waitForPath(b.Pool, b.Image, 10 /*maxRetries*/, true /*useNbdDrive*/)
} else {
devicePath, mapped = waitForPath(b.Pool, b.Image, 10 /*maxRetries*/, false /*useNbdDriver*/)
}
if !mapped {
return "", fmt.Errorf("Could not map image %s/%s, Timeout after 10s", b.Pool, b.Image)
}
}
return devicePath, nil
}
// DetachDisk detaches the disk from the node.
// It detaches device from the node if device is provided, and removes the lock
// if there is persisted RBD info under deviceMountPath.
func (util *RBDUtil) DetachDisk(plugin *rbdPlugin, deviceMountPath string, device string) error {
if len(device) == 0 {
return fmt.Errorf("DetachDisk failed , device is empty")
}
exec := plugin.host.GetExec(plugin.GetPluginName())
var rbdCmd string
// Unlike map, we cannot fallthrough for unmap
// the tool to unmap is based on device type
if strings.HasPrefix(device, "/dev/nbd") {
rbdCmd = "rbd-nbd"
} else {
rbdCmd = "rbd"
}
// rbd unmap
output, err := exec.Run(rbdCmd, "unmap", device)
if err != nil {
return rbdErrors(err, fmt.Errorf("rbd: failed to unmap device %s, error %v, rbd output: %v", device, err, output))
}
glog.V(3).Infof("rbd: successfully unmap device %s", device)
// Currently, we don't persist rbd info on the disk, but for backward
// compatbility, we need to clean it if found.
rbdFile := path.Join(deviceMountPath, "rbd.json")
exists, err := fileutil.FileExists(rbdFile)
if err != nil {
return err
}
if exists {
glog.V(3).Infof("rbd: old rbd.json is found under %s, cleaning it", deviceMountPath)
err = util.cleanOldRBDFile(plugin, rbdFile)
if err != nil {
glog.Errorf("rbd: failed to clean %s", rbdFile)
return err
}
glog.V(3).Infof("rbd: successfully remove %s", rbdFile)
}
return nil
}
// DetachBlockDisk detaches the disk from the node.
func (util *RBDUtil) DetachBlockDisk(disk rbdDiskUnmapper, mapPath string) error {
if pathExists, pathErr := volutil.PathExists(mapPath); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmap skipped because path does not exist: %v", mapPath)
return nil
}
// If we arrive here, device is no longer used, see if we need to logout of the target
device, err := getBlockVolumeDevice(mapPath)
if err != nil {
return err
}
if len(device) == 0 {
return fmt.Errorf("DetachDisk failed , device is empty")
}
exec := disk.plugin.host.GetExec(disk.plugin.GetPluginName())
var rbdCmd string
// Unlike map, we cannot fallthrough here.
// Any nbd device must be unmapped by rbd-nbd
if strings.HasPrefix(device, "/dev/nbd") {
rbdCmd = "rbd-nbd"
glog.V(4).Infof("rbd: using rbd-nbd for unmap function")
} else {
rbdCmd = "rbd"
glog.V(4).Infof("rbd: using rbd for unmap function")
}
// rbd unmap
output, err := exec.Run(rbdCmd, "unmap", device)
if err != nil {
return rbdErrors(err, fmt.Errorf("rbd: failed to unmap device %s, error %v, rbd output: %s", device, err, string(output)))
}
glog.V(3).Infof("rbd: successfully unmap device %s", device)
return nil
}
// cleanOldRBDFile read rbd info from rbd.json file and removes lock if found.
// At last, it removes rbd.json file.
func (util *RBDUtil) cleanOldRBDFile(plugin *rbdPlugin, rbdFile string) error {
mounter := &rbdMounter{
// util.rbdUnlock needs it to run command.
rbd: newRBD("", "", "", "", false, plugin, util),
}
fp, err := os.Open(rbdFile)
if err != nil {
return fmt.Errorf("rbd: open err %s/%s", rbdFile, err)
}
defer fp.Close()
decoder := json.NewDecoder(fp)
if err = decoder.Decode(mounter); err != nil {
return fmt.Errorf("rbd: decode err: %v.", err)
}
if err != nil {
glog.Errorf("failed to load rbd info from %s: %v", rbdFile, err)
return err
}
// Remove rbd lock if found.
// The disk is not attached to this node anymore, so the lock on image
// for this node can be removed safely.
err = util.rbdUnlock(*mounter)
if err == nil {
os.Remove(rbdFile)
}
return err
}
func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDPersistentVolumeSource, size int, err error) {
var output []byte
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volSizeBytes := capacity.Value()
// Convert to MB that rbd defaults on.
sz := int(volutil.RoundUpSize(volSizeBytes, 1024*1024))
volSz := fmt.Sprintf("%d", sz)
mon := util.kernelRBDMonitorsOpt(p.Mon)
if p.rbdMounter.imageFormat == rbdImageFormat2 {
glog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, p.rbdMounter.imageFeatures, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret)
} else {
glog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret)
}
args := []string{"create", p.rbdMounter.Image, "--size", volSz, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key=" + p.rbdMounter.adminSecret, "--image-format", p.rbdMounter.imageFormat}
if p.rbdMounter.imageFormat == rbdImageFormat2 {
// If no image features is provided, it results in empty string
// which disable all RBD image format 2 features as expected.
features := strings.Join(p.rbdMounter.imageFeatures, ",")
args = append(args, "--image-feature", features)
}
output, err = p.exec.Run("rbd", args...)
if err != nil {
glog.Warningf("failed to create rbd image, output %v", string(output))
return nil, 0, fmt.Errorf("failed to create rbd image: %v, command output: %s", err, string(output))
}
return &v1.RBDPersistentVolumeSource{
CephMonitors: p.rbdMounter.Mon,
RBDImage: p.rbdMounter.Image,
RBDPool: p.rbdMounter.Pool,
}, sz, nil
}
func (util *RBDUtil) DeleteImage(p *rbdVolumeDeleter) error {
var output []byte
found, rbdOutput, err := util.rbdStatus(p.rbdMounter)
if err != nil {
return fmt.Errorf("error %v, rbd output: %v", err, rbdOutput)
}
if found {
glog.Info("rbd is still being used ", p.rbdMounter.Image)
return fmt.Errorf("rbd image %s/%s is still being used, rbd output: %v", p.rbdMounter.Pool, p.rbdMounter.Image, rbdOutput)
}
// rbd rm.
mon := util.kernelRBDMonitorsOpt(p.rbdMounter.Mon)
glog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret)
output, err = p.exec.Run("rbd",
"rm", p.rbdMounter.Image, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key="+p.rbdMounter.adminSecret)
if err == nil {
return nil
}
glog.Errorf("failed to delete rbd image: %v, command output: %s", err, string(output))
return fmt.Errorf("error %v, rbd output: %v", err, string(output))
}
// ExpandImage runs rbd resize command to resize the specified image.
func (util *RBDUtil) ExpandImage(rbdExpander *rbdVolumeExpander, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
var output []byte
var err error
volSizeBytes := newSize.Value()
// Convert to MB that rbd defaults on.
sz := int(volutil.RoundUpSize(volSizeBytes, 1024*1024))
newVolSz := fmt.Sprintf("%d", sz)
newSizeQuant := resource.MustParse(fmt.Sprintf("%dMi", sz))
// Check the current size of rbd image, if equals to or greater that the new request size, do nothing.
curSize, infoErr := util.rbdInfo(rbdExpander.rbdMounter)
if infoErr != nil {
return oldSize, fmt.Errorf("rbd info failed, error: %v", infoErr)
}
if curSize >= sz {
return newSizeQuant, nil
}
// rbd resize.
mon := util.kernelRBDMonitorsOpt(rbdExpander.rbdMounter.Mon)
glog.V(4).Infof("rbd: resize %s using mon %s, pool %s id %s key %s", rbdExpander.rbdMounter.Image, mon, rbdExpander.rbdMounter.Pool, rbdExpander.rbdMounter.adminId, rbdExpander.rbdMounter.adminSecret)
output, err = rbdExpander.exec.Run("rbd",
"resize", rbdExpander.rbdMounter.Image, "--size", newVolSz, "--pool", rbdExpander.rbdMounter.Pool, "--id", rbdExpander.rbdMounter.adminId, "-m", mon, "--key="+rbdExpander.rbdMounter.adminSecret)
if err == nil {
return newSizeQuant, nil
}
glog.Errorf("failed to resize rbd image: %v, command output: %s", err, string(output))
return oldSize, err
}
// rbdInfo runs `rbd info` command to get the current image size in MB.
func (util *RBDUtil) rbdInfo(b *rbdMounter) (int, error) {
var err error
var output string
var cmd []byte
// If we don't have admin id/secret (e.g. attaching), fallback to user id/secret.
id := b.adminId
secret := b.adminSecret
if id == "" {
id = b.Id
secret = b.Secret
}
mon := util.kernelRBDMonitorsOpt(b.Mon)
// cmd "rbd info" get the image info with the following output:
//
// # image exists (exit=0)
// rbd info volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08
// size 1024 MB in 256 objects
// order 22 (4096 kB objects)
// block_name_prefix: rbd_data.1253ac238e1f29
// format: 2
// ...
//
// rbd info volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08 --format json
// {"name":"volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08","size":1073741824,"objects":256,"order":22,"object_size":4194304,"block_name_prefix":"rbd_data.1253ac238e1f29","format":2,"features":["layering","exclusive-lock","object-map","fast-diff","deep-flatten"],"flags":[]}
//
//
// # image does not exist (exit=2)
// rbd: error opening image 1234: (2) No such file or directory
//
glog.V(4).Infof("rbd: info %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret)
cmd, err = b.exec.Run("rbd",
"info", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret)
output = string(cmd)
if err, ok := err.(*exec.Error); ok {
if err.Err == exec.ErrNotFound {
glog.Errorf("rbd cmd not found")
// fail fast if rbd command is not found.
return 0, err
}
}
// If command never succeed, returns its last error.
if err != nil {
return 0, err
}
if len(output) == 0 {
return 0, fmt.Errorf("can not get image size info %s: %s", b.Image, output)
}
// Get the size value string, just between `size ` and ` MB in`, such as `size 1024 MB in 256 objects`.
sizeIndex := strings.Index(output, imageSizeStr)
divIndex := strings.Index(output, sizeDivStr)
if sizeIndex == -1 || divIndex == -1 || divIndex <= sizeIndex+5 {
return 0, fmt.Errorf("can not get image size info %s: %s", b.Image, output)
}
rbdSizeStr := output[sizeIndex+5 : divIndex]
rbdSize, err := strconv.Atoi(rbdSizeStr)
if err != nil {
return 0, fmt.Errorf("can not convert size str: %s to int", rbdSizeStr)
}
return rbdSize, nil
}
// rbdStatus runs `rbd status` command to check if there is watcher on the image.
func (util *RBDUtil) rbdStatus(b *rbdMounter) (bool, string, error) {
var err error
var output string
var cmd []byte
// If we don't have admin id/secret (e.g. attaching), fallback to user id/secret.
id := b.adminId
secret := b.adminSecret
if id == "" {
id = b.Id
secret = b.Secret
}
mon := util.kernelRBDMonitorsOpt(b.Mon)
// cmd "rbd status" list the rbd client watch with the following output:
//
// # there is a watcher (exit=0)
// Watchers:
// watcher=10.16.153.105:0/710245699 client.14163 cookie=1
//
// # there is no watcher (exit=0)
// Watchers: none
//
// Otherwise, exit is non-zero, for example:
//
// # image does not exist (exit=2)
// rbd: error opening image kubernetes-dynamic-pvc-<UUID>: (2) No such file or directory
//
glog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret)
cmd, err = b.exec.Run("rbd",
"status", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret)
output = string(cmd)
if err, ok := err.(*exec.Error); ok {
if err.Err == exec.ErrNotFound {
glog.Errorf("rbd cmd not found")
// fail fast if command not found
return false, output, err
}
}
// If command never succeed, returns its last error.
if err != nil {
return false, output, err
}
if strings.Contains(output, imageWatcherStr) {
glog.V(4).Infof("rbd: watchers on %s: %s", b.Image, output)
return true, output, nil
} else {
glog.Warningf("rbd: no watchers on %s", b.Image)
return false, output, nil
}
}