Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -14,29 +14,33 @@ go_library(
"//pkg/cloudprovider:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/volume/attachdetach/cache:go_default_library",
"//pkg/controller/volume/attachdetach/metrics:go_default_library",
"//pkg/controller/volume/attachdetach/populator:go_default_library",
"//pkg/controller/volume/attachdetach/reconciler:go_default_library",
"//pkg/controller/volume/attachdetach/statusupdater:go_default_library",
"//pkg/controller/volume/attachdetach/util:go_default_library",
"//pkg/util/io:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/operationexecutor:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//staging/src/k8s.io/api/authentication/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/authentication/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
],
)
@@ -49,11 +53,11 @@ go_test(
"//pkg/controller/volume/attachdetach/cache:go_default_library",
"//pkg/controller/volume/attachdetach/testing:go_default_library",
"//pkg/volume:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
],
)
@@ -69,6 +73,7 @@ filegroup(
srcs = [
":package-srcs",
"//pkg/controller/volume/attachdetach/cache:all-srcs",
"//pkg/controller/volume/attachdetach/metrics:all-srcs",
"//pkg/controller/volume/attachdetach/populator:all-srcs",
"//pkg/controller/volume/attachdetach/reconciler:all-srcs",
"//pkg/controller/volume/attachdetach/statusupdater:all-srcs",

View File

@@ -1,2 +1,3 @@
approvers:
- saad-ali
- gnufied

View File

@@ -26,9 +26,11 @@ import (
"github.com/golang/glog"
authenticationv1 "k8s.io/api/authentication/v1"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
@@ -36,14 +38,16 @@ import (
corelisters "k8s.io/client-go/listers/core/v1"
kcache "k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
csiclient "k8s.io/csi-api/pkg/client/clientset/versioned"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/metrics"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/util"
"k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
@@ -93,6 +97,7 @@ type AttachDetachController interface {
// NewAttachDetachController returns a new instance of AttachDetachController.
func NewAttachDetachController(
kubeClient clientset.Interface,
csiClient csiclient.Interface,
podInformer coreinformers.PodInformer,
nodeInformer coreinformers.NodeInformer,
pvcInformer coreinformers.PersistentVolumeClaimInformer,
@@ -119,15 +124,18 @@ func NewAttachDetachController(
// deleted (probably can't do this with sharedInformer), etc.
adc := &attachDetachController{
kubeClient: kubeClient,
csiClient: csiClient,
pvcLister: pvcInformer.Lister(),
pvcsSynced: pvcInformer.Informer().HasSynced,
pvLister: pvInformer.Lister(),
pvsSynced: pvInformer.Informer().HasSynced,
podLister: podInformer.Lister(),
podsSynced: podInformer.Informer().HasSynced,
podIndexer: podInformer.Informer().GetIndexer(),
nodeLister: nodeInformer.Lister(),
nodesSynced: nodeInformer.Informer().HasSynced,
cloud: cloud,
pvcQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcs"),
}
if err := adc.volumePluginMgr.InitPlugins(plugins, prober, adc); err != nil {
@@ -179,20 +187,63 @@ func NewAttachDetachController(
DeleteFunc: adc.podDelete,
})
// This custom indexer will index pods by its PVC keys. Then we don't need
// to iterate all pods every time to find pods which reference given PVC.
adc.podIndexer.AddIndexers(kcache.Indexers{
pvcKeyIndex: indexByPVCKey,
})
nodeInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{
AddFunc: adc.nodeAdd,
UpdateFunc: adc.nodeUpdate,
DeleteFunc: adc.nodeDelete,
})
pvcInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
adc.enqueuePVC(obj)
},
UpdateFunc: func(old, new interface{}) {
adc.enqueuePVC(new)
},
})
return adc, nil
}
const (
pvcKeyIndex string = "pvcKey"
)
// indexByPVCKey returns PVC keys for given pod. Note that the index is only
// used for attaching, so we are only interested in active pods with nodeName
// set.
func indexByPVCKey(obj interface{}) ([]string, error) {
pod, ok := obj.(*v1.Pod)
if !ok {
return []string{}, nil
}
if len(pod.Spec.NodeName) == 0 || volumeutil.IsPodTerminated(pod, pod.Status) {
return []string{}, nil
}
keys := []string{}
for _, podVolume := range pod.Spec.Volumes {
if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil {
keys = append(keys, fmt.Sprintf("%s/%s", pod.Namespace, pvcSource.ClaimName))
}
}
return keys, nil
}
type attachDetachController struct {
// kubeClient is the kube API client used by volumehost to communicate with
// the API server.
kubeClient clientset.Interface
// csiClient is the csi.storage.k8s.io API client used by volumehost to communicate with
// the API server.
csiClient csiclient.Interface
// pvcLister is the shared PVC lister used to fetch and store PVC
// objects from the API server. It is shared with other controllers and
// therefore the PVC objects in its store should be treated as immutable.
@@ -207,6 +258,7 @@ type attachDetachController struct {
podLister corelisters.PodLister
podsSynced kcache.InformerSynced
podIndexer kcache.Indexer
nodeLister corelisters.NodeLister
nodesSynced kcache.InformerSynced
@@ -251,10 +303,14 @@ type attachDetachController struct {
// recorder is used to record events in the API server
recorder record.EventRecorder
// pvcQueue is used to queue pvc objects
pvcQueue workqueue.RateLimitingInterface
}
func (adc *attachDetachController) Run(stopCh <-chan struct{}) {
defer runtime.HandleCrash()
defer adc.pvcQueue.ShutDown()
glog.Infof("Starting attach detach controller")
defer glog.Infof("Shutting down attach detach controller")
@@ -273,6 +329,13 @@ func (adc *attachDetachController) Run(stopCh <-chan struct{}) {
}
go adc.reconciler.Run(stopCh)
go adc.desiredStateOfWorldPopulator.Run(stopCh)
go wait.Until(adc.pvcWorker, time.Second, stopCh)
metrics.Register(adc.pvcLister,
adc.pvLister,
adc.podLister,
adc.actualStateOfWorld,
adc.desiredStateOfWorld,
&adc.volumePluginMgr)
<-stopCh
}
@@ -485,6 +548,83 @@ func (adc *attachDetachController) nodeDelete(obj interface{}) {
adc.processVolumesInUse(nodeName, node.Status.VolumesInUse)
}
func (adc *attachDetachController) enqueuePVC(obj interface{}) {
key, err := kcache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
runtime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
return
}
adc.pvcQueue.Add(key)
}
// pvcWorker processes items from pvcQueue
func (adc *attachDetachController) pvcWorker() {
for adc.processNextItem() {
}
}
func (adc *attachDetachController) processNextItem() bool {
keyObj, shutdown := adc.pvcQueue.Get()
if shutdown {
return false
}
defer adc.pvcQueue.Done(keyObj)
if err := adc.syncPVCByKey(keyObj.(string)); err != nil {
// Rather than wait for a full resync, re-add the key to the
// queue to be processed.
adc.pvcQueue.AddRateLimited(keyObj)
runtime.HandleError(fmt.Errorf("Failed to sync pvc %q, will retry again: %v", keyObj.(string), err))
return true
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
adc.pvcQueue.Forget(keyObj)
return true
}
func (adc *attachDetachController) syncPVCByKey(key string) error {
glog.V(5).Infof("syncPVCByKey[%s]", key)
namespace, name, err := kcache.SplitMetaNamespaceKey(key)
if err != nil {
glog.V(4).Infof("error getting namespace & name of pvc %q to get pvc from informer: %v", key, err)
return nil
}
pvc, err := adc.pvcLister.PersistentVolumeClaims(namespace).Get(name)
if apierrors.IsNotFound(err) {
glog.V(4).Infof("error getting pvc %q from informer: %v", key, err)
return nil
}
if err != nil {
return err
}
if pvc.Status.Phase != v1.ClaimBound || pvc.Spec.VolumeName == "" {
// Skip unbound PVCs.
return nil
}
objs, err := adc.podIndexer.ByIndex(pvcKeyIndex, key)
if err != nil {
return err
}
for _, obj := range objs {
pod, ok := obj.(*v1.Pod)
if !ok {
continue
}
volumeActionFlag := util.DetermineVolumeAction(
pod,
adc.desiredStateOfWorld,
true /* default volume action */)
util.ProcessPodVolumes(pod, volumeActionFlag, /* addVolumes */
adc.desiredStateOfWorld, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister)
}
return nil
}
// processVolumesInUse processes the list of volumes marked as "in-use"
// according to the specified Node's Status.VolumesInUse and updates the
// corresponding volume in the actual state of the world to indicate that it is
@@ -503,7 +643,7 @@ func (adc *attachDetachController) processVolumesInUse(
err := adc.actualStateOfWorld.SetVolumeMountedByNode(attachedVolume.VolumeName, nodeName, mounted)
if err != nil {
glog.Warningf(
"SetVolumeMountedByNode(%q, %q, %q) returned an error: %v",
"SetVolumeMountedByNode(%q, %q, %v) returned an error: %v",
attachedVolume.VolumeName, nodeName, mounted, err)
}
}
@@ -559,10 +699,6 @@ func (adc *attachDetachController) GetMounter(pluginName string) mount.Interface
return nil
}
func (adc *attachDetachController) GetWriter() io.Writer {
return nil
}
func (adc *attachDetachController) GetHostName() string {
return ""
}
@@ -622,3 +758,7 @@ func (adc *attachDetachController) GetNodeName() types.NodeName {
func (adc *attachDetachController) GetEventRecorder() record.EventRecorder {
return adc.recorder
}
func (adc *attachDetachController) GetCSIClient() csiclient.Interface {
return adc.csiClient
}

View File

@@ -40,6 +40,7 @@ func Test_NewAttachDetachController_Positive(t *testing.T) {
// Act
_, err := NewAttachDetachController(
fakeKubeClient,
nil, /* csiClient */
informerFactory.Core().V1().Pods(),
informerFactory.Core().V1().Nodes(),
informerFactory.Core().V1().PersistentVolumeClaims(),
@@ -214,6 +215,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
// Create the controller
adcObj, err := NewAttachDetachController(
fakeKubeClient,
nil, /* csiClient */
informerFactory.Core().V1().Pods(),
informerFactory.Core().V1().Nodes(),
informerFactory.Core().V1().PersistentVolumeClaims(),

View File

@@ -18,9 +18,9 @@ go_library(
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/operationexecutor:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)
@@ -35,8 +35,8 @@ go_test(
"//pkg/controller/volume/attachdetach/testing:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
],
)

View File

@@ -0,0 +1,50 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["metrics.go"],
importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach/metrics",
visibility = ["//visibility:public"],
deps = [
"//pkg/controller/volume/attachdetach/cache:go_default_library",
"//pkg/controller/volume/attachdetach/util:go_default_library",
"//pkg/volume:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["metrics_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/controller:go_default_library",
"//pkg/controller/volume/attachdetach/cache:go_default_library",
"//pkg/controller/volume/attachdetach/testing:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,201 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"sync"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/apimachinery/pkg/labels"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/util"
"k8s.io/kubernetes/pkg/volume"
)
const pluginNameNotAvailable = "N/A"
var (
inUseVolumeMetricDesc = prometheus.NewDesc(
prometheus.BuildFQName("", "storage_count", "attachable_volumes_in_use"),
"Measure number of volumes in use",
[]string{"node", "volume_plugin"}, nil)
totalVolumesMetricDesc = prometheus.NewDesc(
prometheus.BuildFQName("", "attachdetach_controller", "total_volumes"),
"Number of volumes in A/D Controller",
[]string{"plugin_name", "state"}, nil)
forcedDetachMetricCounter = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "attachdetach_controller_forced_detaches",
Help: "Number of times the A/D Controller performed a forced detach"})
)
var registerMetrics sync.Once
// Register registers metrics in A/D Controller.
func Register(pvcLister corelisters.PersistentVolumeClaimLister,
pvLister corelisters.PersistentVolumeLister,
podLister corelisters.PodLister,
asw cache.ActualStateOfWorld,
dsw cache.DesiredStateOfWorld,
pluginMgr *volume.VolumePluginMgr) {
registerMetrics.Do(func() {
prometheus.MustRegister(newAttachDetachStateCollector(pvcLister,
podLister,
pvLister,
asw,
dsw,
pluginMgr))
prometheus.MustRegister(forcedDetachMetricCounter)
})
}
type attachDetachStateCollector struct {
pvcLister corelisters.PersistentVolumeClaimLister
podLister corelisters.PodLister
pvLister corelisters.PersistentVolumeLister
asw cache.ActualStateOfWorld
dsw cache.DesiredStateOfWorld
volumePluginMgr *volume.VolumePluginMgr
}
// volumeCount is a map of maps used as a counter, e.g.:
// node 172.168.1.100.ec2.internal has 10 EBS and 3 glusterfs PVC in use:
// {"172.168.1.100.ec2.internal": {"aws-ebs": 10, "glusterfs": 3}}
// state actual_state_of_world contains a total of 10 EBS volumes:
// {"actual_state_of_world": {"aws-ebs": 10}}
type volumeCount map[string]map[string]int64
func (v volumeCount) add(typeKey, counterKey string) {
count, ok := v[typeKey]
if !ok {
count = map[string]int64{}
}
count[counterKey]++
v[typeKey] = count
}
func newAttachDetachStateCollector(
pvcLister corelisters.PersistentVolumeClaimLister,
podLister corelisters.PodLister,
pvLister corelisters.PersistentVolumeLister,
asw cache.ActualStateOfWorld,
dsw cache.DesiredStateOfWorld,
pluginMgr *volume.VolumePluginMgr) *attachDetachStateCollector {
return &attachDetachStateCollector{pvcLister, podLister, pvLister, asw, dsw, pluginMgr}
}
// Check if our collector implements necessary collector interface
var _ prometheus.Collector = &attachDetachStateCollector{}
func (collector *attachDetachStateCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- inUseVolumeMetricDesc
ch <- totalVolumesMetricDesc
}
func (collector *attachDetachStateCollector) Collect(ch chan<- prometheus.Metric) {
nodeVolumeMap := collector.getVolumeInUseCount()
for nodeName, pluginCount := range nodeVolumeMap {
for pluginName, count := range pluginCount {
metric, err := prometheus.NewConstMetric(inUseVolumeMetricDesc,
prometheus.GaugeValue,
float64(count),
string(nodeName),
pluginName)
if err != nil {
glog.Warningf("Failed to create metric : %v", err)
}
ch <- metric
}
}
stateVolumeMap := collector.getTotalVolumesCount()
for stateName, pluginCount := range stateVolumeMap {
for pluginName, count := range pluginCount {
metric, err := prometheus.NewConstMetric(totalVolumesMetricDesc,
prometheus.GaugeValue,
float64(count),
pluginName,
string(stateName))
if err != nil {
glog.Warningf("Failed to create metric : %v", err)
}
ch <- metric
}
}
}
func (collector *attachDetachStateCollector) getVolumeInUseCount() volumeCount {
pods, err := collector.podLister.List(labels.Everything())
if err != nil {
glog.Errorf("Error getting pod list")
return nil
}
nodeVolumeMap := make(volumeCount)
for _, pod := range pods {
if len(pod.Spec.Volumes) <= 0 {
continue
}
if pod.Spec.NodeName == "" {
continue
}
for _, podVolume := range pod.Spec.Volumes {
volumeSpec, err := util.CreateVolumeSpec(podVolume, pod.Namespace, collector.pvcLister, collector.pvLister)
if err != nil {
continue
}
volumePlugin, err := collector.volumePluginMgr.FindPluginBySpec(volumeSpec)
if err != nil {
continue
}
nodeVolumeMap.add(pod.Spec.NodeName, volumePlugin.GetPluginName())
}
}
return nodeVolumeMap
}
func (collector *attachDetachStateCollector) getTotalVolumesCount() volumeCount {
stateVolumeMap := make(volumeCount)
for _, v := range collector.dsw.GetVolumesToAttach() {
if plugin, err := collector.volumePluginMgr.FindPluginBySpec(v.VolumeSpec); err == nil {
pluginName := pluginNameNotAvailable
if plugin != nil {
pluginName = plugin.GetPluginName()
}
stateVolumeMap.add("desired_state_of_world", pluginName)
}
}
for _, v := range collector.asw.GetAttachedVolumes() {
if plugin, err := collector.volumePluginMgr.FindPluginBySpec(v.VolumeSpec); err == nil {
pluginName := pluginNameNotAvailable
if plugin != nil {
pluginName = plugin.GetPluginName()
}
stateVolumeMap.add("actual_state_of_world", pluginName)
}
}
return stateVolumeMap
}
// RecordForcedDetachMetric register a forced detach metric.
func RecordForcedDetachMetric() {
forcedDetachMetricCounter.Inc()
}

View File

@@ -0,0 +1,180 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util/types"
)
func TestVolumesInUseMetricCollection(t *testing.T) {
fakeVolumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
fakeClient := &fake.Clientset{}
fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc())
fakePodInformer := fakeInformerFactory.Core().V1().Pods()
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "metric-test-pod",
UID: "metric-test-pod-uid",
Namespace: "metric-test",
},
Spec: v1.PodSpec{
NodeName: "metric-test-host",
Volumes: []v1.Volume{
{
Name: "metric-test-volume-name",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "metric-test-pvc",
},
},
},
},
},
Status: v1.PodStatus{
Phase: v1.PodPhase("Running"),
},
}
fakePodInformer.Informer().GetStore().Add(pod)
pvcInformer := fakeInformerFactory.Core().V1().PersistentVolumeClaims()
pvInformer := fakeInformerFactory.Core().V1().PersistentVolumes()
pvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "metric-test-pvc",
Namespace: "metric-test",
UID: "metric-test-pvc-1",
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany, v1.ReadWriteOnce},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("2G"),
},
},
VolumeName: "test-metric-pv-1",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
UID: "test-metric-pv-1",
Name: "test-metric-pv-1",
},
Spec: v1.PersistentVolumeSpec{
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("5G"),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{},
},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany},
// this one we're pretending is already bound
ClaimRef: &v1.ObjectReference{UID: "metric-test-pvc-1", Namespace: "metric-test"},
},
}
pvcInformer.Informer().GetStore().Add(pvc)
pvInformer.Informer().GetStore().Add(pv)
pvcLister := pvcInformer.Lister()
pvLister := pvInformer.Lister()
metricCollector := newAttachDetachStateCollector(
pvcLister,
fakePodInformer.Lister(),
pvLister,
nil,
nil,
fakeVolumePluginMgr)
nodeUseMap := metricCollector.getVolumeInUseCount()
if len(nodeUseMap) < 1 {
t.Errorf("Expected one volume in use got %d", len(nodeUseMap))
}
testNodeMetric := nodeUseMap["metric-test-host"]
pluginUseCount, ok := testNodeMetric["fake-plugin"]
if !ok {
t.Errorf("Expected fake plugin pvc got nothing")
}
if pluginUseCount < 1 {
t.Errorf("Expected at least in-use volume metric got %d", pluginUseCount)
}
}
func TestTotalVolumesMetricCollection(t *testing.T) {
fakeVolumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(fakeVolumePluginMgr)
asw := cache.NewActualStateOfWorld(fakeVolumePluginMgr)
podName := "pod-uid"
volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName, false)
_, err := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
asw.AddVolumeNode(volumeName, volumeSpec, nodeName, "")
metricCollector := newAttachDetachStateCollector(
nil,
nil,
nil,
asw,
dsw,
fakeVolumePluginMgr)
totalVolumesMap := metricCollector.getTotalVolumesCount()
if len(totalVolumesMap) != 2 {
t.Errorf("Expected 2 states, got %d", len(totalVolumesMap))
}
dswCount, ok := totalVolumesMap["desired_state_of_world"]
if !ok {
t.Errorf("Expected desired_state_of_world, got nothing")
}
fakePluginCount := dswCount["fake-plugin"]
if fakePluginCount != 1 {
t.Errorf("Expected 1 fake-plugin volume in DesiredStateOfWorld, got %d", fakePluginCount)
}
aswCount, ok := totalVolumesMap["actual_state_of_world"]
if !ok {
t.Errorf("Expected actual_state_of_world, got nothing")
}
fakePluginCount = aswCount["fake-plugin"]
if fakePluginCount != 1 {
t.Errorf("Expected 1 fake-plugin volume in ActualStateOfWorld, got %d", fakePluginCount)
}
}

View File

@@ -15,13 +15,13 @@ go_library(
"//pkg/controller/volume/attachdetach/util:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
],
)
@@ -47,10 +47,10 @@ go_test(
"//pkg/controller/volume/attachdetach/cache:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
],
)

View File

@@ -12,16 +12,17 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler",
deps = [
"//pkg/controller/volume/attachdetach/cache:go_default_library",
"//pkg/controller/volume/attachdetach/metrics:go_default_library",
"//pkg/controller/volume/attachdetach/statusupdater:go_default_library",
"//pkg/kubelet/events:go_default_library",
"//pkg/util/goroutinemap/exponentialbackoff:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util/operationexecutor:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
],
)
@@ -38,11 +39,11 @@ go_test(
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util/operationexecutor:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
],
)

View File

@@ -30,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/metrics"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
kevents "k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
@@ -151,10 +152,6 @@ func (rc *reconciler) isMultiAttachForbidden(volumeSpec *volume.Spec) bool {
// multi-attach. We trust in the individual volume implementations to not allow unsupported access modes
if volumeSpec.PersistentVolume != nil {
// Check for persistent volume types which do not fail when trying to multi-attach
if volumeSpec.PersistentVolume.Spec.VsphereVolume != nil {
return false
}
if len(volumeSpec.PersistentVolume.Spec.AccessModes) == 0 {
// No access mode specified so we don't know for sure. Let the attacher fail if needed
return false
@@ -232,6 +229,7 @@ func (rc *reconciler) reconcile() {
if !timeout {
glog.Infof(attachedVolume.GenerateMsgDetailed("attacherDetacher.DetachVolume started", ""))
} else {
metrics.RecordForcedDetachMetric()
glog.Warningf(attachedVolume.GenerateMsgDetailed("attacherDetacher.DetachVolume started", fmt.Sprintf("This volume is not safe to detach, but maxWaitForUnmountDuration %v expired, force detaching", rc.maxWaitForUnmountDuration)))
}
}

View File

@@ -15,12 +15,12 @@ go_library(
deps = [
"//pkg/controller/volume/attachdetach/cache:go_default_library",
"//pkg/util/node:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
],
)

View File

@@ -12,14 +12,14 @@ go_library(
deps = [
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
],
)

View File

@@ -294,6 +294,10 @@ func (plugin *TestPlugin) NewAttacher() (volume.Attacher, error) {
return &attacher, nil
}
func (plugin *TestPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return plugin.NewAttacher()
}
func (plugin *TestPlugin) NewDetacher() (volume.Detacher, error) {
detacher := testPluginDetacher{
detachedVolumeMap: plugin.detachedVolumeMap,
@@ -302,6 +306,10 @@ func (plugin *TestPlugin) NewDetacher() (volume.Detacher, error) {
return &detacher, nil
}
func (plugin *TestPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return plugin.NewDetacher()
}
func (plugin *TestPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
return []string{}, nil
}

View File

@@ -13,10 +13,10 @@ go_library(
"//pkg/controller/volume/attachdetach/cache:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
],
)

View File

@@ -68,7 +68,7 @@ func CreateVolumeSpec(podVolume v1.Volume, podNamespace string, pvcLister coreli
glog.V(10).Infof(
"Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)",
volumeSpec.Name,
volumeSpec.Name(),
pvName,
podNamespace,
pvcSource.ClaimName,

View File

@@ -30,4 +30,5 @@ const (
ProvisioningCleanupFailed = "ProvisioningCleanupFailed"
ProvisioningSucceeded = "ProvisioningSucceeded"
WaitForFirstConsumer = "WaitForFirstConsumer"
ExternalExpanding = "ExternalExpanding"
)

View File

@@ -16,29 +16,30 @@ go_library(
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/volume/events:go_default_library",
"//pkg/controller/volume/expand/cache:go_default_library",
"//pkg/util/goroutinemap/exponentialbackoff:go_default_library",
"//pkg/util/io:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/operationexecutor:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//staging/src/k8s.io/api/authentication/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/authentication/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
],
)

View File

@@ -11,16 +11,15 @@ go_library(
srcs = ["volume_resize_map.go"],
importpath = "k8s.io/kubernetes/pkg/controller/volume/expand/cache",
deps = [
"//pkg/util/strings:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
@@ -43,10 +42,10 @@ go_test(
embed = [":go_default_library"],
deps = [
"//pkg/volume/util/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
],
)

View File

@@ -28,7 +28,6 @@ import (
commontypes "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/types"
)
@@ -78,7 +77,7 @@ func (pvcr *PVCWithResizeRequest) UniquePVCKey() types.UniquePVCName {
// QualifiedName returns namespace and name combination of the PVC
func (pvcr *PVCWithResizeRequest) QualifiedName() string {
return strings.JoinQualifiedName(pvcr.PVC.Namespace, pvcr.PVC.Name)
return util.GetPersistentVolumeClaimQualifiedName(pvcr.PVC)
}
// NewVolumeResizeMap returns new VolumeResizeMap which acts as a cache

View File

@@ -37,12 +37,14 @@ import (
corelisters "k8s.io/client-go/listers/core/v1"
kcache "k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/volume/events"
"k8s.io/kubernetes/pkg/controller/volume/expand/cache"
"k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
)
@@ -118,13 +120,13 @@ func NewExpandController(
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "volume_expand"})
expc.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "volume_expand"})
blkutil := volumepathhandler.NewBlockVolumePathHandler()
expc.opExecutor = operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
kubeClient,
&expc.volumePluginMgr,
recorder,
expc.recorder,
false,
blkutil))
@@ -141,6 +143,7 @@ func NewExpandController(
expc.resizeMap,
expc.pvcLister,
expc.pvLister,
&expc.volumePluginMgr,
kubeClient)
return expc, nil
}
@@ -180,9 +183,9 @@ func (expc *expandController) deletePVC(obj interface{}) {
}
func (expc *expandController) pvcUpdate(oldObj, newObj interface{}) {
oldPvc, ok := oldObj.(*v1.PersistentVolumeClaim)
oldPVC, ok := oldObj.(*v1.PersistentVolumeClaim)
if oldPvc == nil || !ok {
if oldPVC == nil || !ok {
return
}
@@ -193,7 +196,7 @@ func (expc *expandController) pvcUpdate(oldObj, newObj interface{}) {
}
newSize := newPVC.Spec.Resources.Requests[v1.ResourceStorage]
oldSize := oldPvc.Spec.Resources.Requests[v1.ResourceStorage]
oldSize := oldPVC.Spec.Resources.Requests[v1.ResourceStorage]
// We perform additional checks inside resizeMap.AddPVCUpdate function
// this check here exists to ensure - we do not consider every
@@ -201,7 +204,20 @@ func (expc *expandController) pvcUpdate(oldObj, newObj interface{}) {
if newSize.Cmp(oldSize) > 0 {
pv, err := getPersistentVolume(newPVC, expc.pvLister)
if err != nil {
glog.V(5).Infof("Error getting Persistent Volume for pvc %q : %v", newPVC.UID, err)
glog.V(5).Infof("Error getting Persistent Volume for PVC %q : %v", newPVC.UID, err)
return
}
// Filter PVCs for which the corresponding volume plugins don't allow expansion.
volumeSpec := volume.NewSpecFromPersistentVolume(pv, false)
volumePlugin, err := expc.volumePluginMgr.FindExpandablePluginBySpec(volumeSpec)
if err != nil || volumePlugin == nil {
err = fmt.Errorf("didn't find a plugin capable of expanding the volume; " +
"waiting for an external controller to process this PVC")
expc.recorder.Event(newPVC, v1.EventTypeNormal, events.ExternalExpanding,
fmt.Sprintf("Ignoring the PVC: %v.", err))
glog.V(3).Infof("Ignoring the PVC %q (uid: %q) : %v.",
util.GetPersistentVolumeClaimQualifiedName(newPVC), newPVC.UID, err)
return
}
expc.resizeMap.AddPVCUpdate(newPVC, pv)
@@ -268,10 +284,6 @@ func (expc *expandController) GetExec(pluginName string) mount.Exec {
return mount.NewOsExec()
}
func (expc *expandController) GetWriter() io.Writer {
return nil
}
func (expc *expandController) GetHostName() string {
return ""
}
@@ -313,3 +325,8 @@ func (expc *expandController) GetNodeName() types.NodeName {
func (expc *expandController) GetEventRecorder() record.EventRecorder {
return expc.recorder
}
func (expc *expandController) GetCSIClient() csiclientset.Interface {
// No volume plugin in expand controller needs csi.storage.k8s.io
return nil
}

View File

@@ -21,15 +21,23 @@ limitations under the License.
package expand
import (
"fmt"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/controller/volume/events"
"k8s.io/kubernetes/pkg/controller/volume/expand/cache"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
// PVCPopulator iterates through PVCs and checks if for bound PVCs
@@ -39,11 +47,13 @@ type PVCPopulator interface {
}
type pvcPopulator struct {
loopPeriod time.Duration
resizeMap cache.VolumeResizeMap
pvcLister corelisters.PersistentVolumeClaimLister
pvLister corelisters.PersistentVolumeLister
kubeClient clientset.Interface
loopPeriod time.Duration
resizeMap cache.VolumeResizeMap
pvcLister corelisters.PersistentVolumeClaimLister
pvLister corelisters.PersistentVolumeLister
kubeClient clientset.Interface
volumePluginMgr *volume.VolumePluginMgr
recorder record.EventRecorder
}
func NewPVCPopulator(
@@ -51,14 +61,19 @@ func NewPVCPopulator(
resizeMap cache.VolumeResizeMap,
pvcLister corelisters.PersistentVolumeClaimLister,
pvLister corelisters.PersistentVolumeLister,
volumePluginMgr *volume.VolumePluginMgr,
kubeClient clientset.Interface) PVCPopulator {
populator := &pvcPopulator{
loopPeriod: loopPeriod,
pvcLister: pvcLister,
pvLister: pvLister,
resizeMap: resizeMap,
kubeClient: kubeClient,
loopPeriod: loopPeriod,
pvcLister: pvcLister,
pvLister: pvLister,
resizeMap: resizeMap,
volumePluginMgr: volumePluginMgr,
kubeClient: kubeClient,
}
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
populator.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "volume_expand"})
return populator
}
@@ -77,9 +92,25 @@ func (populator *pvcPopulator) Sync() {
pv, err := getPersistentVolume(pvc, populator.pvLister)
if err != nil {
glog.V(5).Infof("Error getting persistent volume for pvc %q : %v", pvc.UID, err)
glog.V(5).Infof("Error getting persistent volume for PVC %q : %v", pvc.UID, err)
continue
}
// Filter PVCs for which the corresponding volume plugins don't allow expansion.
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
pvcStatusSize := pvc.Status.Capacity[v1.ResourceStorage]
volumeSpec := volume.NewSpecFromPersistentVolume(pv, false)
volumePlugin, err := populator.volumePluginMgr.FindExpandablePluginBySpec(volumeSpec)
if (err != nil || volumePlugin == nil) && pvcStatusSize.Cmp(pvcSize) < 0 {
err = fmt.Errorf("didn't find a plugin capable of expanding the volume; " +
"waiting for an external controller to process this PVC")
populator.recorder.Event(pvc, v1.EventTypeNormal, events.ExternalExpanding,
fmt.Sprintf("Ignoring the PVC: %v.", err))
glog.V(3).Infof("Ignoring the PVC %q (uid: %q) : %v.",
util.GetPersistentVolumeClaimQualifiedName(pvc), pvc.UID, err)
continue
}
// We are only going to add PVCs which are:
// - bound
// - pvc.Spec.Size > pvc.Status.Size

View File

@@ -28,36 +28,36 @@ go_library(
"//pkg/features:go_default_library",
"//pkg/util/goroutinemap:go_default_library",
"//pkg/util/goroutinemap/exponentialbackoff:go_default_library",
"//pkg/util/io:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/recyclerclient:go_default_library",
"//staging/src/k8s.io/api/authentication/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/tools/reference:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/authentication/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/informers/storage/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/storage/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/tools/reference:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
@@ -83,28 +83,28 @@ go_test(
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/recyclerclient:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/tools/reference:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/storage/v1:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/tools/reference:go_default_library",
],
)

View File

@@ -3,3 +3,9 @@ approvers:
- saad-ali
- thockin
- msau42 # for volume scheduling
reviewers:
- jsafrane
- saad-ali
- thockin
- msau42 # for volume scheduling
- lichuqiang # for volume scheduling

View File

@@ -10,9 +10,9 @@ go_library(
srcs = ["metrics.go"],
importpath = "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics",
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
],
)

View File

@@ -56,6 +56,8 @@ type PVCLister interface {
func Register(pvLister PVLister, pvcLister PVCLister) {
registerMetrics.Do(func() {
prometheus.MustRegister(newPVAndPVCCountCollector(pvLister, pvcLister))
prometheus.MustRegister(volumeOperationMetric)
prometheus.MustRegister(volumeOperationErrorsMetric)
})
}
@@ -89,6 +91,19 @@ var (
prometheus.BuildFQName("", pvControllerSubsystem, unboundPVCKey),
"Gauge measuring number of persistent volume claim currently unbound",
[]string{namespaceLabel}, nil)
volumeOperationMetric = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "volume_operation_total_seconds",
Help: "Total volume operation time",
},
[]string{"plugin_name", "operation_name"})
volumeOperationErrorsMetric = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "volume_operation_total_errors",
Help: "Total volume operation erros",
},
[]string{"plugin_name", "operation_name"})
)
func (collector *pvAndPVCCountCollector) Describe(ch chan<- *prometheus.Desc) {
@@ -182,3 +197,15 @@ func (collector *pvAndPVCCountCollector) pvcCollect(ch chan<- prometheus.Metric)
ch <- metric
}
}
// RecordVolumeOperationMetric records the latency and errors of volume operations.
func RecordVolumeOperationMetric(pluginName, opName string, timeTaken float64, err error) {
if pluginName == "" {
pluginName = "N/A"
}
if err != nil {
volumeOperationErrorsMetric.WithLabelValues(pluginName, opName).Inc()
return
}
volumeOperationMetric.WithLabelValues(pluginName, opName).Observe(timeTaken)
}

View File

@@ -40,6 +40,7 @@ import (
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/volume/events"
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/goroutinemap"
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
@@ -122,6 +123,8 @@ const annBindCompleted = "pv.kubernetes.io/bind-completed"
// the binding (PV->PVC or PVC->PV) was installed by the controller. The
// absence of this annotation means the binding was done by the user (i.e.
// pre-bound). Value of this annotation does not matter.
// External PV binders must bind PV the same way as PV controller, otherwise PV
// controller may not handle it correctly.
const annBoundByController = "pv.kubernetes.io/bound-by-controller"
// This annotation is added to a PV that has been dynamically provisioned by
@@ -137,7 +140,7 @@ const annStorageProvisioner = "volume.beta.kubernetes.io/storage-provisioner"
// This annotation is added to a PVC that has been triggered by scheduler to
// be dynamically provisioned. Its value is the name of the selected node.
const annSelectedNode = "volume.alpha.kubernetes.io/selected-node"
const annSelectedNode = "volume.kubernetes.io/selected-node"
// If the provisioner name in a storage class is set to "kubernetes.io/no-provisioner",
// then dynamic provisioning is not supported by the storage.
@@ -264,7 +267,7 @@ func checkVolumeSatisfyClaim(volume *v1.PersistentVolume, claim *v1.PersistentVo
requestedClass := v1helper.GetPersistentVolumeClaimClass(claim)
if v1helper.GetPersistentVolumeClass(volume) != requestedClass {
return fmt.Errorf("storageClasseName does not match")
return fmt.Errorf("storageClassName does not match")
}
isMisMatch, err := checkVolumeModeMisMatches(&claim.Spec, &volume.Spec)
@@ -287,14 +290,12 @@ func (ctrl *PersistentVolumeController) shouldDelayBinding(claim *v1.PersistentV
return false, nil
}
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
// When feature DynamicProvisioningScheduling enabled,
// Scheduler signal to the PV controller to start dynamic
// provisioning by setting the "annSelectedNode" annotation
// in the PVC
if _, ok := claim.Annotations[annSelectedNode]; ok {
return false, nil
}
// When feature VolumeScheduling enabled,
// Scheduler signal to the PV controller to start dynamic
// provisioning by setting the "annSelectedNode" annotation
// in the PVC
if _, ok := claim.Annotations[annSelectedNode]; ok {
return false, nil
}
className := v1helper.GetPersistentVolumeClaimClass(claim)
@@ -544,6 +545,30 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume)
if err != nil {
return err
}
if !found && metav1.HasAnnotation(volume.ObjectMeta, annBoundByController) {
// If PV is bound by external PV binder (e.g. kube-scheduler), it's
// possible on heavy load that corresponding PVC is not synced to
// controller local cache yet. So we need to double-check PVC in
// 1) informer cache
// 2) apiserver if not found in informer cache
// to make sure we will not reclaim a PV wrongly.
// Note that only non-released and non-failed volumes will be
// updated to Released state when PVC does not eixst.
if volume.Status.Phase != v1.VolumeReleased && volume.Status.Phase != v1.VolumeFailed {
obj, err = ctrl.claimLister.PersistentVolumeClaims(volume.Spec.ClaimRef.Namespace).Get(volume.Spec.ClaimRef.Name)
if err != nil && !apierrs.IsNotFound(err) {
return err
}
found = !apierrs.IsNotFound(err)
if !found {
obj, err = ctrl.kubeClient.CoreV1().PersistentVolumeClaims(volume.Spec.ClaimRef.Namespace).Get(volume.Spec.ClaimRef.Name, metav1.GetOptions{})
if err != nil && !apierrs.IsNotFound(err) {
return err
}
found = !apierrs.IsNotFound(err)
}
}
}
if !found {
glog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s not found", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef))
// Fall through with claim = nil
@@ -1058,8 +1083,12 @@ func (ctrl *PersistentVolumeController) reclaimVolume(volume *v1.PersistentVolum
case v1.PersistentVolumeReclaimDelete:
glog.V(4).Infof("reclaimVolume[%s]: policy is Delete", volume.Name)
opName := fmt.Sprintf("delete-%s[%s]", volume.Name, string(volume.UID))
startTime := time.Now()
ctrl.scheduleOperation(opName, func() error {
return ctrl.deleteVolumeOperation(volume)
pluginName, err := ctrl.deleteVolumeOperation(volume)
timeTaken := time.Since(startTime).Seconds()
metrics.RecordVolumeOperationMetric(pluginName, "delete", timeTaken, err)
return err
})
default:
@@ -1157,7 +1186,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis
// deleteVolumeOperation deletes a volume. This method is running in standalone
// goroutine and already has all necessary locks.
func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.PersistentVolume) error {
func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.PersistentVolume) (string, error) {
glog.V(4).Infof("deleteVolumeOperation [%s] started", volume.Name)
// This method may have been waiting for a volume lock for some time.
@@ -1166,19 +1195,19 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.Persist
newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{})
if err != nil {
glog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err)
return nil
return "", nil
}
needsReclaim, err := ctrl.isVolumeReleased(newVolume)
if err != nil {
glog.V(3).Infof("error reading claim for volume %q: %v", volume.Name, err)
return nil
return "", nil
}
if !needsReclaim {
glog.V(3).Infof("volume %q no longer needs deletion, skipping", volume.Name)
return nil
return "", nil
}
deleted, err := ctrl.doDeleteVolume(volume)
pluginName, deleted, err := ctrl.doDeleteVolume(volume)
if err != nil {
// Delete failed, update the volume and emit an event.
glog.V(3).Infof("deletion of volume %q failed: %v", volume.Name, err)
@@ -1192,17 +1221,17 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.Persist
if _, err := ctrl.updateVolumePhaseWithEvent(volume, v1.VolumeFailed, v1.EventTypeWarning, events.VolumeFailedDelete, err.Error()); err != nil {
glog.V(4).Infof("deleteVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err)
// Save failed, retry on the next deletion attempt
return err
return pluginName, err
}
}
// Despite the volume being Failed, the controller will retry deleting
// the volume in every syncVolume() call.
return err
return pluginName, err
}
if !deleted {
// The volume waits for deletion by an external plugin. Do nothing.
return nil
return pluginName, nil
}
glog.V(4).Infof("deleteVolumeOperation [%s]: success", volume.Name)
@@ -1213,9 +1242,9 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.Persist
// cache of "recently deleted volumes" and avoid unnecessary deletion,
// this is left out as future optimization.
glog.V(3).Infof("failed to delete volume %q from database: %v", volume.Name, err)
return nil
return pluginName, nil
}
return nil
return pluginName, nil
}
// isVolumeReleased returns true if given volume is released and can be recycled
@@ -1292,43 +1321,44 @@ func (ctrl *PersistentVolumeController) isVolumeUsed(pv *v1.PersistentVolume) ([
return podNames.List(), podNames.Len() != 0, nil
}
// doDeleteVolume finds appropriate delete plugin and deletes given volume. It
// returns 'true', when the volume was deleted and 'false' when the volume
// cannot be deleted because of the deleter is external. No error should be
// reported in this case.
func (ctrl *PersistentVolumeController) doDeleteVolume(volume *v1.PersistentVolume) (bool, error) {
// doDeleteVolume finds appropriate delete plugin and deletes given volume, returning
// the volume plugin name. Also, it returns 'true', when the volume was deleted and
// 'false' when the volume cannot be deleted because of the deleter is external. No
// error should be reported in this case.
func (ctrl *PersistentVolumeController) doDeleteVolume(volume *v1.PersistentVolume) (string, bool, error) {
glog.V(4).Infof("doDeleteVolume [%s]", volume.Name)
var err error
plugin, err := ctrl.findDeletablePlugin(volume)
if err != nil {
return false, err
return "", false, err
}
if plugin == nil {
// External deleter is requested, do nothing
glog.V(3).Infof("external deleter for volume %q requested, ignoring", volume.Name)
return false, nil
return "", false, nil
}
// Plugin found
glog.V(5).Infof("found a deleter plugin %q for volume %q", plugin.GetPluginName(), volume.Name)
pluginName := plugin.GetPluginName()
glog.V(5).Infof("found a deleter plugin %q for volume %q", pluginName, volume.Name)
spec := vol.NewSpecFromPersistentVolume(volume, false)
deleter, err := plugin.NewDeleter(spec)
if err != nil {
// Cannot create deleter
return false, fmt.Errorf("Failed to create deleter for volume %q: %v", volume.Name, err)
return pluginName, false, fmt.Errorf("Failed to create deleter for volume %q: %v", volume.Name, err)
}
opComplete := util.OperationCompleteHook(plugin.GetPluginName(), "volume_delete")
opComplete := util.OperationCompleteHook(pluginName, "volume_delete")
err = deleter.Delete()
opComplete(&err)
if err != nil {
// Deleter failed
return false, err
return pluginName, false, err
}
glog.V(2).Infof("volume %q deleted", volume.Name)
return true, nil
return pluginName, true, nil
}
// provisionClaim starts new asynchronous operation to provision a claim if
@@ -1339,16 +1369,19 @@ func (ctrl *PersistentVolumeController) provisionClaim(claim *v1.PersistentVolum
}
glog.V(4).Infof("provisionClaim[%s]: started", claimToClaimKey(claim))
opName := fmt.Sprintf("provision-%s[%s]", claimToClaimKey(claim), string(claim.UID))
startTime := time.Now()
ctrl.scheduleOperation(opName, func() error {
ctrl.provisionClaimOperation(claim)
return nil
pluginName, err := ctrl.provisionClaimOperation(claim)
timeTaken := time.Since(startTime).Seconds()
metrics.RecordVolumeOperationMetric(pluginName, "provision", timeTaken, err)
return err
})
return nil
}
// provisionClaimOperation provisions a volume. This method is running in
// standalone goroutine and already has all necessary locks.
func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.PersistentVolumeClaim) {
func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.PersistentVolumeClaim) (string, error) {
claimClass := v1helper.GetPersistentVolumeClaimClass(claim)
glog.V(4).Infof("provisionClaimOperation [%s] started, class: %q", claimToClaimKey(claim), claimClass)
@@ -1358,7 +1391,12 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
glog.V(2).Infof("error finding provisioning plugin for claim %s: %v", claimToClaimKey(claim), err)
// The controller will retry provisioning the volume in every
// syncVolume() call.
return
return "", err
}
var pluginName string
if plugin != nil {
pluginName = plugin.GetPluginName()
}
// Add provisioner annotation so external provisioners know when to start
@@ -1366,7 +1404,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
if err != nil {
// Save failed, the controller will retry in the next sync
glog.V(2).Infof("error saving claim %s: %v", claimToClaimKey(claim), err)
return
return pluginName, err
}
claim = newClaim
@@ -1377,7 +1415,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
msg := fmt.Sprintf("waiting for a volume to be created, either by external provisioner %q or manually created by system administrator", storageClass.Provisioner)
ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, events.ExternalProvisioning, msg)
glog.V(3).Infof("provisioning claim %q: %s", claimToClaimKey(claim), msg)
return
return pluginName, nil
}
// internal provisioning
@@ -1391,7 +1429,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
if err == nil && volume != nil {
// Volume has been already provisioned, nothing to do.
glog.V(4).Infof("provisionClaimOperation [%s]: volume already exists, skipping", claimToClaimKey(claim))
return
return pluginName, err
}
// Prepare a claimRef to the claim early (to fail before a volume is
@@ -1399,7 +1437,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
claimRef, err := ref.GetReference(scheme.Scheme, claim)
if err != nil {
glog.V(3).Infof("unexpected error getting claim reference: %v", err)
return
return pluginName, err
}
// Gather provisioning options
@@ -1424,7 +1462,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
strerr := fmt.Sprintf("Mount options are not supported by the provisioner but StorageClass %q has mount options %v", storageClass.Name, options.MountOptions)
glog.V(2).Infof("Mount options are not supported by the provisioner but claim %q's StorageClass %q has mount options %v", claimToClaimKey(claim), storageClass.Name, options.MountOptions)
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
return
return pluginName, fmt.Errorf("provisioner %q doesn't support mount options", plugin.GetPluginName())
}
// Provision the volume
@@ -1433,29 +1471,26 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
strerr := fmt.Sprintf("Failed to create provisioner: %v", err)
glog.V(2).Infof("failed to create provisioner for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err)
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
return
return pluginName, err
}
var selectedNode *v1.Node = nil
var allowedTopologies []v1.TopologySelectorTerm = nil
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
if nodeName, ok := claim.Annotations[annSelectedNode]; ok {
selectedNode, err = ctrl.NodeLister.Get(nodeName)
if err != nil {
strerr := fmt.Sprintf("Failed to get target node: %v", err)
glog.V(3).Infof("unexpected error getting target node %q for claim %q: %v", nodeName, claimToClaimKey(claim), err)
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
return
}
if nodeName, ok := claim.Annotations[annSelectedNode]; ok {
selectedNode, err = ctrl.NodeLister.Get(nodeName)
if err != nil {
strerr := fmt.Sprintf("Failed to get target node: %v", err)
glog.V(3).Infof("unexpected error getting target node %q for claim %q: %v", nodeName, claimToClaimKey(claim), err)
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
return pluginName, err
}
allowedTopologies = storageClass.AllowedTopologies
}
allowedTopologies := storageClass.AllowedTopologies
opComplete := util.OperationCompleteHook(plugin.GetPluginName(), "volume_provision")
volume, err = provisioner.Provision(selectedNode, allowedTopologies)
opComplete(&err)
if err != nil {
// Other places of failure have nothing to do with DynamicProvisioningScheduling,
// Other places of failure have nothing to do with VolumeScheduling,
// so just let controller retry in the next sync. We'll only call func
// rescheduleProvisioning here when the underlying provisioning actually failed.
ctrl.rescheduleProvisioning(claim)
@@ -1463,7 +1498,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
strerr := fmt.Sprintf("Failed to provision volume with StorageClass %q: %v", storageClass.Name, err)
glog.V(2).Infof("failed to provision volume for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err)
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
return
return pluginName, err
}
glog.V(3).Infof("volume %q for claim %q created", volume.Name, claimToClaimKey(claim))
@@ -1518,7 +1553,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
var deleteErr error
var deleted bool
for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ {
deleted, deleteErr = ctrl.doDeleteVolume(volume)
_, deleted, deleteErr = ctrl.doDeleteVolume(volume)
if deleteErr == nil && deleted {
// Delete succeeded
glog.V(4).Infof("provisionClaimOperation [%s]: cleaning volume %s succeeded", claimToClaimKey(claim), volume.Name)
@@ -1548,6 +1583,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
msg := fmt.Sprintf("Successfully provisioned volume %s using %s", volume.Name, plugin.GetPluginName())
ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, events.ProvisioningSucceeded, msg)
}
return pluginName, nil
}
// rescheduleProvisioning signal back to the scheduler to retry dynamic provisioning

View File

@@ -237,12 +237,21 @@ func addVolumeAnnotation(volume *v1.PersistentVolume, annName, annValue string)
return volume
}
func makePVCClass(scName *string) *v1.PersistentVolumeClaim {
return &v1.PersistentVolumeClaim{
func makePVCClass(scName *string, hasSelectNodeAnno bool) *v1.PersistentVolumeClaim {
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{},
},
Spec: v1.PersistentVolumeClaimSpec{
StorageClassName: scName,
},
}
if hasSelectNodeAnno {
claim.Annotations[annSelectedNode] = "node-name"
}
return claim
}
func makeStorageClass(scName string, mode *storagev1.VolumeBindingMode) *storagev1.StorageClass {
@@ -271,26 +280,30 @@ func TestDelayBinding(t *testing.T) {
shouldFail bool
}{
"nil-class": {
pvc: makePVCClass(nil),
pvc: makePVCClass(nil, false),
shouldDelay: false,
},
"class-not-found": {
pvc: makePVCClass(&classNotHere),
pvc: makePVCClass(&classNotHere, false),
shouldDelay: false,
},
"no-mode-class": {
pvc: makePVCClass(&classNoMode),
pvc: makePVCClass(&classNoMode, false),
shouldDelay: false,
shouldFail: true,
},
"immediate-mode-class": {
pvc: makePVCClass(&classImmediateMode),
pvc: makePVCClass(&classImmediateMode, false),
shouldDelay: false,
},
"wait-mode-class": {
pvc: makePVCClass(&classWaitMode),
pvc: makePVCClass(&classWaitMode, false),
shouldDelay: true,
},
"wait-mode-class-with-selectedNode": {
pvc: makePVCClass(&classWaitMode, true),
shouldDelay: false,
},
}
classes := []*storagev1.StorageClass{
@@ -314,7 +327,7 @@ func TestDelayBinding(t *testing.T) {
// When volumeScheduling feature gate is disabled, should always be delayed
name := "volumeScheduling-feature-disabled"
shouldDelay, err := ctrl.shouldDelayBinding(makePVCClass(&classWaitMode))
shouldDelay, err := ctrl.shouldDelayBinding(makePVCClass(&classWaitMode, false))
if err != nil {
t.Errorf("Test %q returned error: %v", name, err)
}
@@ -338,43 +351,4 @@ func TestDelayBinding(t *testing.T) {
t.Errorf("Test %q returned unexpected %v", name, test.shouldDelay)
}
}
// When dynamicProvisioningScheduling feature gate is disabled, should be delayed,
// even if the pvc has selectedNode annotation.
provisionedClaim := makePVCClass(&classWaitMode)
provisionedClaim.Annotations = map[string]string{annSelectedNode: "node-name"}
name = "dynamicProvisioningScheduling-feature-disabled"
shouldDelay, err = ctrl.shouldDelayBinding(provisionedClaim)
if err != nil {
t.Errorf("Test %q returned error: %v", name, err)
}
if !shouldDelay {
t.Errorf("Test %q returned false, expected true", name)
}
// Enable DynamicProvisioningScheduling feature gate
utilfeature.DefaultFeatureGate.Set("DynamicProvisioningScheduling=true")
defer utilfeature.DefaultFeatureGate.Set("DynamicProvisioningScheduling=false")
// When the pvc does not have selectedNode annotation, should be delayed,
// even if dynamicProvisioningScheduling feature gate is enabled.
name = "dynamicProvisioningScheduling-feature-enabled, selectedNode-annotation-not-set"
shouldDelay, err = ctrl.shouldDelayBinding(makePVCClass(&classWaitMode))
if err != nil {
t.Errorf("Test %q returned error: %v", name, err)
}
if !shouldDelay {
t.Errorf("Test %q returned false, expected true", name)
}
// Should not be delayed when dynamicProvisioningScheduling feature gate is enabled,
// and the pvc has selectedNode annotation.
name = "dynamicProvisioningScheduling-feature-enabled, selectedNode-annotation-set"
shouldDelay, err = ctrl.shouldDelayBinding(provisionedClaim)
if err != nil {
t.Errorf("Test %q returned error: %v", name, err)
}
if shouldDelay {
t.Errorf("Test %q returned true, expected false", name)
}
}

View File

@@ -83,7 +83,8 @@ func (e *errObjectName) Error() string {
// Restore() sets the latest object pointer back to the informer object.
// Get/List() always returns the latest object pointer.
type assumeCache struct {
mutex sync.Mutex
// Synchronizes updates to store
rwMutex sync.RWMutex
// describes the object stored
description string
@@ -155,8 +156,8 @@ func (c *assumeCache) add(obj interface{}) {
return
}
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
if objInfo, _ := c.getObjInfo(name); objInfo != nil {
newVersion, err := c.getObjVersion(name, obj)
@@ -199,8 +200,8 @@ func (c *assumeCache) delete(obj interface{}) {
return
}
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
objInfo := &objInfo{name: name}
err = c.store.Delete(objInfo)
@@ -239,8 +240,8 @@ func (c *assumeCache) getObjInfo(name string) (*objInfo, error) {
}
func (c *assumeCache) Get(objName string) (interface{}, error) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.RLock()
defer c.rwMutex.RUnlock()
objInfo, err := c.getObjInfo(objName)
if err != nil {
@@ -250,8 +251,8 @@ func (c *assumeCache) Get(objName string) (interface{}, error) {
}
func (c *assumeCache) List(indexObj interface{}) []interface{} {
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.RLock()
defer c.rwMutex.RUnlock()
allObjs := []interface{}{}
objs, err := c.store.Index(c.indexName, &objInfo{latestObj: indexObj})
@@ -277,8 +278,8 @@ func (c *assumeCache) Assume(obj interface{}) error {
return &errObjectName{err}
}
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
objInfo, err := c.getObjInfo(name)
if err != nil {
@@ -306,8 +307,8 @@ func (c *assumeCache) Assume(obj interface{}) error {
}
func (c *assumeCache) Restore(objName string) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
objInfo, err := c.getObjInfo(objName)
if err != nil {

View File

@@ -456,7 +456,7 @@ func TestAssumeUpdatePVCCache(t *testing.T) {
// Assume PVC
newPVC := pvc.DeepCopy()
newPVC.Annotations["volume.alpha.kubernetes.io/selected-node"] = "test-node"
newPVC.Annotations[annSelectedNode] = "test-node"
if err := cache.Assume(newPVC); err != nil {
t.Fatalf("failed to assume PVC: %v", err)
}

View File

@@ -19,18 +19,18 @@ package persistentvolume
import (
"fmt"
"sort"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/apimachinery/pkg/util/wait"
coreinformers "k8s.io/client-go/informers/core/v1"
storageinformers "k8s.io/client-go/informers/storage/v1"
clientset "k8s.io/client-go/kubernetes"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
@@ -44,16 +44,19 @@ import (
// a. Invokes all predicate functions, parallelized across nodes. FindPodVolumes() is invoked here.
// b. Invokes all priority functions. Future/TBD
// c. Selects the best node for the Pod.
// d. Cache the node selection for the Pod. (Assume phase)
// d. Cache the node selection for the Pod. AssumePodVolumes() is invoked here.
// i. If PVC binding is required, cache in-memory only:
// * Updated PV objects for prebinding to the corresponding PVCs.
// * For the pod, which PVs need API updates.
// AssumePodVolumes() is invoked here. Then BindPodVolumes() is called asynchronously by the
// scheduler. After BindPodVolumes() is complete, the Pod is added back to the scheduler queue
// to be processed again until all PVCs are bound.
// ii. If PVC binding is not required, cache the Pod->Node binding in the scheduler's pod cache,
// and asynchronously bind the Pod to the Node. This is handled in the scheduler and not here.
// 2. Once the assume operation is done, the scheduler processes the next Pod in the scheduler queue
// * For manual binding: update PV objects for prebinding to the corresponding PVCs.
// * For dynamic provisioning: update PVC object with a selected node from c)
// * For the pod, which PVCs and PVs need API updates.
// ii. Afterwards, the main scheduler caches the Pod->Node binding in the scheduler's pod cache,
// This is handled in the scheduler and not here.
// e. Asynchronously bind volumes and pod in a separate goroutine
// i. BindPodVolumes() is called first. It makes all the necessary API updates and waits for
// PV controller to fully bind and provision the PVCs. If binding fails, the Pod is sent
// back through the scheduler.
// ii. After BindPodVolumes() is complete, then the scheduler does the final Pod->Node binding.
// 2. Once all the assume operations are done in d), the scheduler processes the next Pod in the scheduler queue
// while the actual binding operation occurs in the background.
type SchedulerVolumeBinder interface {
// FindPodVolumes checks if all of a Pod's PVCs can be satisfied by the node.
@@ -73,18 +76,18 @@ type SchedulerVolumeBinder interface {
// 2. Take the PVCs that need provisioning and update the PVC cache with related
// annotations set.
//
// It returns true if all volumes are fully bound, and returns true if any volume binding/provisioning
// API operation needs to be done afterwards.
// It returns true if all volumes are fully bound
//
// This function will modify assumedPod with the node name.
// This function is called serially.
AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, bindingRequired bool, err error)
AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, err error)
// BindPodVolumes will:
// 1. Initiate the volume binding by making the API call to prebind the PV
// to its matching PVC.
// 2. Trigger the volume provisioning by making the API call to set related
// annotations on the PVC
// 3. Wait for PVCs to be completely bound by the PV controller
//
// This function can be called in parallel.
BindPodVolumes(assumedPod *v1.Pod) error
@@ -102,6 +105,9 @@ type volumeBinder struct {
// Stores binding decisions that were made in FindPodVolumes for use in AssumePodVolumes.
// AssumePodVolumes modifies the bindings again for use in BindPodVolumes.
podBindingCache PodBindingCache
// Amount of time to wait for the bind operation to succeed
bindTimeout time.Duration
}
// NewVolumeBinder sets up all the caches needed for the scheduler to make volume binding decisions.
@@ -109,7 +115,8 @@ func NewVolumeBinder(
kubeClient clientset.Interface,
pvcInformer coreinformers.PersistentVolumeClaimInformer,
pvInformer coreinformers.PersistentVolumeInformer,
storageClassInformer storageinformers.StorageClassInformer) SchedulerVolumeBinder {
storageClassInformer storageinformers.StorageClassInformer,
bindTimeout time.Duration) SchedulerVolumeBinder {
// TODO: find better way...
ctrl := &PersistentVolumeController{
@@ -122,6 +129,7 @@ func NewVolumeBinder(
pvcCache: NewPVCAssumeCache(pvcInformer.Informer()),
pvCache: NewPVAssumeCache(pvInformer.Informer()),
podBindingCache: NewPodBindingCache(),
bindTimeout: bindTimeout,
}
return b
@@ -151,7 +159,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
// Immediate claims should be bound
if len(unboundClaimsImmediate) > 0 {
return false, false, fmt.Errorf("pod has unbound PersistentVolumeClaims")
return false, false, fmt.Errorf("pod has unbound immediate PersistentVolumeClaims")
}
// Check PV node affinity on bound volumes
@@ -169,13 +177,11 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
return false, false, err
}
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
// Try to provision for unbound volumes
if !unboundVolumesSatisfied {
unboundVolumesSatisfied, err = b.checkVolumeProvisions(pod, claimsToProvision, node)
if err != nil {
return false, false, err
}
// Try to provision for unbound volumes
if !unboundVolumesSatisfied {
unboundVolumesSatisfied, err = b.checkVolumeProvisions(pod, claimsToProvision, node)
if err != nil {
return false, false, err
}
}
}
@@ -187,22 +193,24 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
// in podBindingCache for the chosen node, and:
// 1. Update the pvCache with the new prebound PV.
// 2. Update the pvcCache with the new PVCs with annotations set
// It will update podBindingCache again with the PVs and PVCs that need an API update.
func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound, bindingRequired bool, err error) {
// 3. Update podBindingCache again with cached API updates for PVs and PVCs.
func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, err error) {
podName := getPodName(assumedPod)
glog.V(4).Infof("AssumePodVolumes for pod %q, node %q", podName, nodeName)
if allBound := b.arePodVolumesBound(assumedPod); allBound {
glog.V(4).Infof("AssumePodVolumes for pod %q, node %q: all PVCs bound and nothing to do", podName, nodeName)
return true, false, nil
return true, nil
}
assumedPod.Spec.NodeName = nodeName
// Assume PV
claimsToBind := b.podBindingCache.GetBindings(assumedPod, nodeName)
newBindings := []*bindingInfo{}
claimsToBind := b.podBindingCache.GetBindings(assumedPod, nodeName)
claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, nodeName)
// Assume PV
newBindings := []*bindingInfo{}
for _, binding := range claimsToBind {
newPV, dirty, err := b.ctrl.getBindVolumeToClaim(binding.pv, binding.pvc)
glog.V(5).Infof("AssumePodVolumes: getBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v",
@@ -214,29 +222,20 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al
err)
if err != nil {
b.revertAssumedPVs(newBindings)
return false, true, err
return false, err
}
// TODO: can we assume everytime?
if dirty {
err = b.pvCache.Assume(newPV)
if err != nil {
b.revertAssumedPVs(newBindings)
return false, true, err
return false, err
}
newBindings = append(newBindings, &bindingInfo{pv: newPV, pvc: binding.pvc})
}
}
// Don't update cached bindings if no API updates are needed. This can happen if we
// previously updated the PV object and are waiting for the PV controller to finish binding.
if len(newBindings) != 0 {
bindingRequired = true
b.podBindingCache.UpdateBindings(assumedPod, nodeName, newBindings)
newBindings = append(newBindings, &bindingInfo{pv: newPV, pvc: binding.pvc})
}
// Assume PVCs
claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, nodeName)
newProvisionedPVCs := []*v1.PersistentVolumeClaim{}
for _, claim := range claimsToProvision {
// The claims from method args can be pointing to watcher cache. We must not
@@ -253,50 +252,37 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al
newProvisionedPVCs = append(newProvisionedPVCs, claimClone)
}
if len(newProvisionedPVCs) != 0 {
bindingRequired = true
b.podBindingCache.UpdateProvisionedPVCs(assumedPod, nodeName, newProvisionedPVCs)
}
// Update cache with the assumed pvcs and pvs
// Even if length is zero, update the cache with an empty slice to indicate that no
// operations are needed
b.podBindingCache.UpdateBindings(assumedPod, nodeName, newBindings)
b.podBindingCache.UpdateProvisionedPVCs(assumedPod, nodeName, newProvisionedPVCs)
return
}
// BindPodVolumes gets the cached bindings and PVCs to provision in podBindingCache
// and makes the API update for those PVs/PVCs.
// BindPodVolumes gets the cached bindings and PVCs to provision in podBindingCache,
// makes the API update for those PVs/PVCs, and waits for the PVCs to be completely bound
// by the PV controller.
func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod) error {
podName := getPodName(assumedPod)
glog.V(4).Infof("BindPodVolumes for pod %q", podName)
glog.V(4).Infof("BindPodVolumes for pod %q, node %q", podName, assumedPod.Spec.NodeName)
bindings := b.podBindingCache.GetBindings(assumedPod, assumedPod.Spec.NodeName)
claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, assumedPod.Spec.NodeName)
// Do the actual prebinding. Let the PV controller take care of the rest
// There is no API rollback if the actual binding fails
for i, bindingInfo := range bindings {
glog.V(5).Infof("BindPodVolumes: Pod %q, binding PV %q to PVC %q", podName, bindingInfo.pv.Name, bindingInfo.pvc.Name)
_, err := b.ctrl.updateBindVolumeToClaim(bindingInfo.pv, bindingInfo.pvc, false)
if err != nil {
// only revert assumed cached updates for volumes we haven't successfully bound
b.revertAssumedPVs(bindings[i:])
// Revert all of the assumed cached updates for claims,
// since no actual API update will be done
b.revertAssumedPVCs(claimsToProvision)
return err
}
// Start API operations
err := b.bindAPIUpdate(podName, bindings, claimsToProvision)
if err != nil {
return err
}
// Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest
// PV controller is expect to signal back by removing related annotations if actual provisioning fails
for i, claim := range claimsToProvision {
if _, err := b.ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim); err != nil {
glog.V(4).Infof("updating PersistentVolumeClaim[%s] failed: %v", getPVCName(claim), err)
// only revert assumed cached updates for claims we haven't successfully updated
b.revertAssumedPVCs(claimsToProvision[i:])
return err
}
}
return nil
return wait.Poll(time.Second, b.bindTimeout, func() (bool, error) {
// Get cached values every time in case the pod gets deleted
bindings = b.podBindingCache.GetBindings(assumedPod, assumedPod.Spec.NodeName)
claimsToProvision = b.podBindingCache.GetProvisionedPVCs(assumedPod, assumedPod.Spec.NodeName)
return b.checkBindings(assumedPod, bindings, claimsToProvision)
})
}
func getPodName(pod *v1.Pod) string {
@@ -307,12 +293,131 @@ func getPVCName(pvc *v1.PersistentVolumeClaim) string {
return pvc.Namespace + "/" + pvc.Name
}
func (b *volumeBinder) isVolumeBound(namespace string, vol *v1.Volume, checkFullyBound bool) (bool, *v1.PersistentVolumeClaim, error) {
// bindAPIUpdate gets the cached bindings and PVCs to provision in podBindingCache
// and makes the API update for those PVs/PVCs.
func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) error {
if bindings == nil {
return fmt.Errorf("failed to get cached bindings for pod %q", podName)
}
if claimsToProvision == nil {
return fmt.Errorf("failed to get cached claims to provision for pod %q", podName)
}
lastProcessedBinding := 0
lastProcessedProvisioning := 0
defer func() {
// only revert assumed cached updates for volumes we haven't successfully bound
if lastProcessedBinding < len(bindings) {
b.revertAssumedPVs(bindings[lastProcessedBinding:])
}
// only revert assumed cached updates for claims we haven't updated,
if lastProcessedProvisioning < len(claimsToProvision) {
b.revertAssumedPVCs(claimsToProvision[lastProcessedProvisioning:])
}
}()
var (
binding *bindingInfo
claim *v1.PersistentVolumeClaim
)
// Do the actual prebinding. Let the PV controller take care of the rest
// There is no API rollback if the actual binding fails
for _, binding = range bindings {
glog.V(5).Infof("bindAPIUpdate: Pod %q, binding PV %q to PVC %q", podName, binding.pv.Name, binding.pvc.Name)
// TODO: does it hurt if we make an api call and nothing needs to be updated?
if _, err := b.ctrl.updateBindVolumeToClaim(binding.pv, binding.pvc, false); err != nil {
return err
}
lastProcessedBinding++
}
// Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest
// PV controller is expect to signal back by removing related annotations if actual provisioning fails
for _, claim = range claimsToProvision {
glog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim))
if _, err := b.ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim); err != nil {
return err
}
lastProcessedProvisioning++
}
return nil
}
// checkBindings runs through all the PVCs in the Pod and checks:
// * if the PVC is fully bound
// * if there are any conditions that require binding to fail and be retried
//
// It returns true when all of the Pod's PVCs are fully bound, and error if
// binding (and scheduling) needs to be retried
func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*bindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) (bool, error) {
podName := getPodName(pod)
if bindings == nil {
return false, fmt.Errorf("failed to get cached bindings for pod %q", podName)
}
if claimsToProvision == nil {
return false, fmt.Errorf("failed to get cached claims to provision for pod %q", podName)
}
for _, binding := range bindings {
// Check for any conditions that might require scheduling retry
// Check if pv still exists
pv, err := b.pvCache.GetPV(binding.pv.Name)
if err != nil || pv == nil {
return false, fmt.Errorf("failed to check pv binding: %v", err)
}
// Check if pv.ClaimRef got dropped by unbindVolume()
if pv.Spec.ClaimRef == nil || pv.Spec.ClaimRef.UID == "" {
return false, fmt.Errorf("ClaimRef got reset for pv %q", pv.Name)
}
// Check if pvc is fully bound
if isBound, _, err := b.isPVCBound(binding.pvc.Namespace, binding.pvc.Name); !isBound || err != nil {
return false, err
}
// TODO; what if pvc is bound to the wrong pv? It means our assume cache should be reverted.
// Or will pv controller cleanup the pv.ClaimRef?
}
for _, claim := range claimsToProvision {
bound, pvc, err := b.isPVCBound(claim.Namespace, claim.Name)
if err != nil || pvc == nil {
return false, fmt.Errorf("failed to check pvc binding: %v", err)
}
// Check if selectedNode annotation is still set
if pvc.Annotations == nil {
return false, fmt.Errorf("selectedNode annotation reset for PVC %q", pvc.Name)
}
selectedNode := pvc.Annotations[annSelectedNode]
if selectedNode != pod.Spec.NodeName {
return false, fmt.Errorf("selectedNode annotation value %q not set to scheduled node %q", selectedNode, pod.Spec.NodeName)
}
if !bound {
return false, nil
}
}
// All pvs and pvcs that we operated on are bound
glog.V(4).Infof("All PVCs for pod %q are bound", podName)
return true, nil
}
func (b *volumeBinder) isVolumeBound(namespace string, vol *v1.Volume) (bool, *v1.PersistentVolumeClaim, error) {
if vol.PersistentVolumeClaim == nil {
return true, nil, nil
}
pvcName := vol.PersistentVolumeClaim.ClaimName
return b.isPVCBound(namespace, pvcName)
}
func (b *volumeBinder) isPVCBound(namespace, pvcName string) (bool, *v1.PersistentVolumeClaim, error) {
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: pvcName,
@@ -326,17 +431,13 @@ func (b *volumeBinder) isVolumeBound(namespace string, vol *v1.Volume, checkFull
pvName := pvc.Spec.VolumeName
if pvName != "" {
if checkFullyBound {
if metav1.HasAnnotation(pvc.ObjectMeta, annBindCompleted) {
glog.V(5).Infof("PVC %q is fully bound to PV %q", getPVCName(pvc), pvName)
return true, pvc, nil
} else {
glog.V(5).Infof("PVC %q is not fully bound to PV %q", getPVCName(pvc), pvName)
return false, pvc, nil
}
if metav1.HasAnnotation(pvc.ObjectMeta, annBindCompleted) {
glog.V(5).Infof("PVC %q is fully bound to PV %q", getPVCName(pvc), pvName)
return true, pvc, nil
} else {
glog.V(5).Infof("PVC %q is not fully bound to PV %q", getPVCName(pvc), pvName)
return false, pvc, nil
}
glog.V(5).Infof("PVC %q is bound or prebound to PV %q", getPVCName(pvc), pvName)
return true, pvc, nil
}
glog.V(5).Infof("PVC %q is not bound", getPVCName(pvc))
@@ -346,7 +447,7 @@ func (b *volumeBinder) isVolumeBound(namespace string, vol *v1.Volume, checkFull
// arePodVolumesBound returns true if all volumes are fully bound
func (b *volumeBinder) arePodVolumesBound(pod *v1.Pod) bool {
for _, vol := range pod.Spec.Volumes {
if isBound, _, _ := b.isVolumeBound(pod.Namespace, &vol, true); !isBound {
if isBound, _, _ := b.isVolumeBound(pod.Namespace, &vol); !isBound {
// Pod has at least one PVC that needs binding
return false
}
@@ -362,7 +463,7 @@ func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentV
unboundClaims = []*bindingInfo{}
for _, vol := range pod.Spec.Volumes {
volumeBound, pvc, err := b.isVolumeBound(pod.Namespace, &vol, false)
volumeBound, pvc, err := b.isVolumeBound(pod.Namespace, &vol)
if err != nil {
return nil, nil, nil, err
}
@@ -376,7 +477,8 @@ func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentV
if err != nil {
return nil, nil, nil, err
}
if delayBinding {
// Prebound PVCs are treated as unbound immediate binding
if delayBinding && pvc.Spec.VolumeName == "" {
// Scheduler path
unboundClaims = append(unboundClaims, &bindingInfo{pvc: pvc})
} else {
@@ -522,19 +624,6 @@ type bindingInfo struct {
pv *v1.PersistentVolume
}
// Used in unit test errors
func (b bindingInfo) String() string {
pvcName := ""
pvName := ""
if b.pvc != nil {
pvcName = getPVCName(b.pvc)
}
if b.pv != nil {
pvName = b.pv.Name
}
return fmt.Sprintf("[PVC %q, PV %q]", pvcName, pvName)
}
type byPVCSize []*bindingInfo
func (a byPVCSize) Len() int {

View File

@@ -31,6 +31,8 @@ type PodBindingCache interface {
UpdateBindings(pod *v1.Pod, node string, bindings []*bindingInfo)
// GetBindings will return the cached bindings for the given pod and node.
// A nil return value means that the entry was not found. An empty slice
// means that no binding operations are needed.
GetBindings(pod *v1.Pod, node string) []*bindingInfo
// UpdateProvisionedPVCs will update the cache with the given provisioning decisions
@@ -38,6 +40,8 @@ type PodBindingCache interface {
UpdateProvisionedPVCs(pod *v1.Pod, node string, provisionings []*v1.PersistentVolumeClaim)
// GetProvisionedPVCs will return the cached provisioning decisions for the given pod and node.
// A nil return value means that the entry was not found. An empty slice
// means that no provisioning operations are needed.
GetProvisionedPVCs(pod *v1.Pod, node string) []*v1.PersistentVolumeClaim
// DeleteBindings will remove all cached bindings and provisionings for the given pod.
@@ -46,7 +50,8 @@ type PodBindingCache interface {
}
type podBindingCache struct {
mutex sync.Mutex
// synchronizes bindingDecisions
rwMutex sync.RWMutex
// Key = pod name
// Value = nodeDecisions
@@ -68,16 +73,16 @@ func NewPodBindingCache() PodBindingCache {
}
func (c *podBindingCache) DeleteBindings(pod *v1.Pod) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
podName := getPodName(pod)
delete(c.bindingDecisions, podName)
}
func (c *podBindingCache) UpdateBindings(pod *v1.Pod, node string, bindings []*bindingInfo) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
podName := getPodName(pod)
decisions, ok := c.bindingDecisions[podName]
@@ -97,8 +102,8 @@ func (c *podBindingCache) UpdateBindings(pod *v1.Pod, node string, bindings []*b
}
func (c *podBindingCache) GetBindings(pod *v1.Pod, node string) []*bindingInfo {
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.RLock()
defer c.rwMutex.RUnlock()
podName := getPodName(pod)
decisions, ok := c.bindingDecisions[podName]
@@ -113,8 +118,8 @@ func (c *podBindingCache) GetBindings(pod *v1.Pod, node string) []*bindingInfo {
}
func (c *podBindingCache) UpdateProvisionedPVCs(pod *v1.Pod, node string, pvcs []*v1.PersistentVolumeClaim) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
podName := getPodName(pod)
decisions, ok := c.bindingDecisions[podName]
@@ -134,8 +139,8 @@ func (c *podBindingCache) UpdateProvisionedPVCs(pod *v1.Pod, node string, pvcs [
}
func (c *podBindingCache) GetProvisionedPVCs(pod *v1.Pod, node string) []*v1.PersistentVolumeClaim {
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.RLock()
defer c.rwMutex.RUnlock()
podName := getPodName(pod)
decisions, ok := c.bindingDecisions[podName]

View File

@@ -16,18 +16,15 @@ limitations under the License.
package persistentvolume
import (
"k8s.io/api/core/v1"
)
import "k8s.io/api/core/v1"
type FakeVolumeBinderConfig struct {
AllBound bool
FindUnboundSatsified bool
FindBoundSatsified bool
FindErr error
AssumeBindingRequired bool
AssumeErr error
BindErr error
AllBound bool
FindUnboundSatsified bool
FindBoundSatsified bool
FindErr error
AssumeErr error
BindErr error
}
// NewVolumeBinder sets up all the caches needed for the scheduler to make
@@ -48,9 +45,9 @@ func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVo
return b.config.FindUnboundSatsified, b.config.FindBoundSatsified, b.config.FindErr
}
func (b *FakeVolumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (bool, bool, error) {
func (b *FakeVolumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (bool, error) {
b.AssumeCalled = true
return b.config.AllBound, b.config.AssumeBindingRequired, b.config.AssumeErr
return b.config.AllBound, b.config.AssumeErr
}
func (b *FakeVolumeBinder) BindPodVolumes(assumedPod *v1.Pod) error {

View File

@@ -20,6 +20,7 @@ import (
"fmt"
"reflect"
"testing"
"time"
"github.com/golang/glog"
@@ -38,20 +39,30 @@ import (
)
var (
unboundPVC = makeTestPVC("unbound-pvc", "1G", pvcUnbound, "", "1", &waitClass)
unboundPVC2 = makeTestPVC("unbound-pvc2", "5G", pvcUnbound, "", "1", &waitClass)
preboundPVC = makeTestPVC("prebound-pvc", "1G", pvcPrebound, "pv-node1a", "1", &waitClass)
boundPVC = makeTestPVC("bound-pvc", "1G", pvcBound, "pv-bound", "1", &waitClass)
boundPVC2 = makeTestPVC("bound-pvc2", "1G", pvcBound, "pv-bound2", "1", &waitClass)
badPVC = makeBadPVC()
immediateUnboundPVC = makeTestPVC("immediate-unbound-pvc", "1G", pvcUnbound, "", "1", &immediateClass)
immediateBoundPVC = makeTestPVC("immediate-bound-pvc", "1G", pvcBound, "pv-bound-immediate", "1", &immediateClass)
provisionedPVC = makeTestPVC("provisioned-pvc", "1Gi", pvcUnbound, "", "1", &waitClass)
provisionedPVC2 = makeTestPVC("provisioned-pvc2", "1Gi", pvcUnbound, "", "1", &waitClass)
provisionedPVCHigherVersion = makeTestPVC("provisioned-pvc2", "1Gi", pvcUnbound, "", "2", &waitClass)
noProvisionerPVC = makeTestPVC("no-provisioner-pvc", "1Gi", pvcUnbound, "", "1", &provisionNotSupportClass)
topoMismatchPVC = makeTestPVC("topo-mismatch-pvc", "1Gi", pvcUnbound, "", "1", &topoMismatchClass)
// PVCs for manual binding
// TODO: clean up all of these
unboundPVC = makeTestPVC("unbound-pvc", "1G", "", pvcUnbound, "", "1", &waitClass)
unboundPVC2 = makeTestPVC("unbound-pvc2", "5G", "", pvcUnbound, "", "1", &waitClass)
preboundPVC = makeTestPVC("prebound-pvc", "1G", "", pvcPrebound, "pv-node1a", "1", &waitClass)
preboundPVCNode1a = makeTestPVC("unbound-pvc", "1G", "", pvcPrebound, "pv-node1a", "1", &waitClass)
boundPVC = makeTestPVC("bound-pvc", "1G", "", pvcBound, "pv-bound", "1", &waitClass)
boundPVC2 = makeTestPVC("bound-pvc2", "1G", "", pvcBound, "pv-bound2", "1", &waitClass)
boundPVCNode1a = makeTestPVC("unbound-pvc", "1G", "", pvcBound, "pv-node1a", "1", &waitClass)
badPVC = makeBadPVC()
immediateUnboundPVC = makeTestPVC("immediate-unbound-pvc", "1G", "", pvcUnbound, "", "1", &immediateClass)
immediateBoundPVC = makeTestPVC("immediate-bound-pvc", "1G", "", pvcBound, "pv-bound-immediate", "1", &immediateClass)
// PVCs for dynamic provisioning
provisionedPVC = makeTestPVC("provisioned-pvc", "1Gi", "", pvcUnbound, "", "1", &waitClassWithProvisioner)
provisionedPVC2 = makeTestPVC("provisioned-pvc2", "1Gi", "", pvcUnbound, "", "1", &waitClassWithProvisioner)
provisionedPVCHigherVersion = makeTestPVC("provisioned-pvc2", "1Gi", "", pvcUnbound, "", "2", &waitClassWithProvisioner)
provisionedPVCBound = makeTestPVC("provisioned-pvc", "1Gi", "", pvcBound, "some-pv", "1", &waitClassWithProvisioner)
noProvisionerPVC = makeTestPVC("no-provisioner-pvc", "1Gi", "", pvcUnbound, "", "1", &waitClass)
topoMismatchPVC = makeTestPVC("topo-mismatch-pvc", "1Gi", "", pvcUnbound, "", "1", &topoMismatchClass)
selectedNodePVC = makeTestPVC("provisioned-pvc", "1Gi", nodeLabelValue, pvcSelectedNode, "", "1", &waitClassWithProvisioner)
// PVs for manual binding
pvNoNode = makeTestPV("pv-no-node", "", "1G", "1", nil, waitClass)
pvNode1a = makeTestPV("pv-node1a", "node1", "5G", "1", nil, waitClass)
pvNode1b = makeTestPV("pv-node1b", "node1", "10G", "1", nil, waitClass)
@@ -59,12 +70,13 @@ var (
pvNode2 = makeTestPV("pv-node2", "node2", "1G", "1", nil, waitClass)
pvPrebound = makeTestPV("pv-prebound", "node1", "1G", "1", unboundPVC, waitClass)
pvBound = makeTestPV("pv-bound", "node1", "1G", "1", boundPVC, waitClass)
pvNode1aBound = makeTestPV("pv-node1a", "node1", "1G", "1", unboundPVC, waitClass)
pvNode1bBound = makeTestPV("pv-node1b", "node1", "5G", "1", unboundPVC2, waitClass)
pvNode1bBoundHigherVersion = makeTestPV("pv-node1b", "node1", "5G", "2", unboundPVC2, waitClass)
pvNode1aBound = makeTestPV("pv-node1a", "node1", "5G", "1", unboundPVC, waitClass)
pvNode1bBound = makeTestPV("pv-node1b", "node1", "10G", "1", unboundPVC2, waitClass)
pvNode1bBoundHigherVersion = makeTestPV("pv-node1b", "node1", "10G", "2", unboundPVC2, waitClass)
pvBoundImmediate = makeTestPV("pv-bound-immediate", "node1", "1G", "1", immediateBoundPVC, immediateClass)
pvBoundImmediateNode2 = makeTestPV("pv-bound-immediate", "node2", "1G", "1", immediateBoundPVC, immediateClass)
// PVC/PV bindings for manual binding
binding1a = makeBinding(unboundPVC, pvNode1a)
binding1b = makeBinding(unboundPVC2, pvNode1b)
bindingNoNode = makeBinding(unboundPVC, pvNoNode)
@@ -72,11 +84,13 @@ var (
binding1aBound = makeBinding(unboundPVC, pvNode1aBound)
binding1bBound = makeBinding(unboundPVC2, pvNode1bBound)
// storage class names
waitClass = "waitClass"
immediateClass = "immediateClass"
provisionNotSupportClass = "provisionNotSupportedClass"
waitClassWithProvisioner = "waitClassWithProvisioner"
topoMismatchClass = "topoMismatchClass"
// node topology
nodeLabelKey = "nodeKey"
nodeLabelValue = "node1"
)
@@ -102,7 +116,8 @@ func newTestBinder(t *testing.T) *testEnv {
client,
pvcInformer,
informerFactory.Core().V1().PersistentVolumes(),
classInformer)
classInformer,
10*time.Second)
// Add storageclasses
waitMode := storagev1.VolumeBindingWaitForFirstConsumer
@@ -110,7 +125,7 @@ func newTestBinder(t *testing.T) *testEnv {
classes := []*storagev1.StorageClass{
{
ObjectMeta: metav1.ObjectMeta{
Name: waitClass,
Name: waitClassWithProvisioner,
},
VolumeBindingMode: &waitMode,
Provisioner: "test-provisioner",
@@ -133,7 +148,7 @@ func newTestBinder(t *testing.T) *testEnv {
},
{
ObjectMeta: metav1.ObjectMeta{
Name: provisionNotSupportClass,
Name: waitClass,
},
VolumeBindingMode: &waitMode,
Provisioner: "kubernetes.io/no-provisioner",
@@ -247,17 +262,44 @@ func (env *testEnv) initPodCache(pod *v1.Pod, node string, bindings []*bindingIn
func (env *testEnv) validatePodCache(t *testing.T, name, node string, pod *v1.Pod, expectedBindings []*bindingInfo, expectedProvisionings []*v1.PersistentVolumeClaim) {
cache := env.internalBinder.podBindingCache
bindings := cache.GetBindings(pod, node)
if aLen, eLen := len(bindings), len(expectedBindings); aLen != eLen {
t.Errorf("Test %q failed. expected %v bindings, got %v", name, eLen, aLen)
} else if expectedBindings == nil && bindings != nil {
// nil and empty are different
t.Errorf("Test %q failed. expected nil bindings, got empty", name)
} else if expectedBindings != nil && bindings == nil {
// nil and empty are different
t.Errorf("Test %q failed. expected empty bindings, got nil", name)
} else {
for i := 0; i < aLen; i++ {
// Validate PV
if !reflect.DeepEqual(expectedBindings[i].pv, bindings[i].pv) {
t.Errorf("Test %q failed. binding.pv doesn't match [A-expected, B-got]: %s", name, diff.ObjectDiff(expectedBindings[i].pv, bindings[i].pv))
}
if !reflect.DeepEqual(expectedBindings, bindings) {
t.Errorf("Test %q failed: Expected bindings %+v, got %+v", name, expectedBindings, bindings)
// Validate PVC
if !reflect.DeepEqual(expectedBindings[i].pvc, bindings[i].pvc) {
t.Errorf("Test %q failed. binding.pvc doesn't match [A-expected, B-got]: %s", name, diff.ObjectDiff(expectedBindings[i].pvc, bindings[i].pvc))
}
}
}
provisionedClaims := cache.GetProvisionedPVCs(pod, node)
if !reflect.DeepEqual(expectedProvisionings, provisionedClaims) {
t.Errorf("Test %q failed: Expected provisionings %+v, got %+v", name, expectedProvisionings, provisionedClaims)
if aLen, eLen := len(provisionedClaims), len(expectedProvisionings); aLen != eLen {
t.Errorf("Test %q failed. expected %v provisioned claims, got %v", name, eLen, aLen)
} else if expectedProvisionings == nil && provisionedClaims != nil {
// nil and empty are different
t.Errorf("Test %q failed. expected nil provisionings, got empty", name)
} else if expectedProvisionings != nil && provisionedClaims == nil {
// nil and empty are different
t.Errorf("Test %q failed. expected empty provisionings, got nil", name)
} else {
for i := 0; i < aLen; i++ {
if !reflect.DeepEqual(expectedProvisionings[i], provisionedClaims[i]) {
t.Errorf("Test %q failed. provisioned claims doesn't match [A-expected, B-got]: %s", name, diff.ObjectDiff(expectedProvisionings[i], provisionedClaims[i]))
}
}
}
}
func (env *testEnv) getPodBindings(t *testing.T, name, node string, pod *v1.Pod) []*bindingInfo {
@@ -266,8 +308,6 @@ func (env *testEnv) getPodBindings(t *testing.T, name, node string, pod *v1.Pod)
}
func (env *testEnv) validateAssume(t *testing.T, name string, pod *v1.Pod, bindings []*bindingInfo, provisionings []*v1.PersistentVolumeClaim) {
// TODO: Check binding cache
// Check pv cache
pvCache := env.internalBinder.pvCache
for _, b := range bindings {
@@ -383,17 +423,21 @@ const (
pvcUnbound = iota
pvcPrebound
pvcBound
pvcSelectedNode
)
func makeTestPVC(name, size string, pvcBoundState int, pvName, resourceVersion string, className *string) *v1.PersistentVolumeClaim {
func makeTestPVC(name, size, node string, pvcBoundState int, pvName, resourceVersion string, className *string) *v1.PersistentVolumeClaim {
pvc := &v1.PersistentVolumeClaim{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: "testns",
UID: types.UID("pvc-uid"),
ResourceVersion: resourceVersion,
SelfLink: testapi.Default.SelfLink("pvc", name),
Annotations: map[string]string{},
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
@@ -406,6 +450,9 @@ func makeTestPVC(name, size string, pvcBoundState int, pvName, resourceVersion s
}
switch pvcBoundState {
case pvcSelectedNode:
metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, annSelectedNode, node)
// don't fallthrough
case pvcBound:
metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, annBindCompleted, "yes")
fallthrough
@@ -454,9 +501,12 @@ func makeTestPV(name, node, capacity, version string, boundToPVC *v1.PersistentV
if boundToPVC != nil {
pv.Spec.ClaimRef = &v1.ObjectReference{
Name: boundToPVC.Name,
Namespace: boundToPVC.Namespace,
UID: boundToPVC.UID,
Kind: boundToPVC.Kind,
APIVersion: boundToPVC.APIVersion,
ResourceVersion: boundToPVC.ResourceVersion,
Name: boundToPVC.Name,
Namespace: boundToPVC.Namespace,
UID: boundToPVC.UID,
}
metav1.SetMetaDataAnnotation(&pv.ObjectMeta, annBoundByController, "yes")
}
@@ -464,6 +514,24 @@ func makeTestPV(name, node, capacity, version string, boundToPVC *v1.PersistentV
return pv
}
func pvcSetSelectedNode(pvc *v1.PersistentVolumeClaim, node string) *v1.PersistentVolumeClaim {
newPVC := pvc.DeepCopy()
metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, annSelectedNode, node)
return newPVC
}
func pvcSetEmptyAnnotations(pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
newPVC := pvc.DeepCopy()
newPVC.Annotations = map[string]string{}
return newPVC
}
func pvRemoveClaimUID(pv *v1.PersistentVolume) *v1.PersistentVolume {
newPV := pv.DeepCopy()
newPV.Spec.ClaimRef.UID = ""
return newPV
}
func makePod(pvcs []*v1.PersistentVolumeClaim) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@@ -515,7 +583,7 @@ func makeBinding(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) *bindin
func addProvisionAnn(pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
res := pvc.DeepCopy()
// Add provision related annotations
res.Annotations[annSelectedNode] = nodeLabelValue
metav1.SetMetaDataAnnotation(&res.ObjectMeta, annSelectedNode, nodeLabelValue)
return res
}
@@ -568,10 +636,9 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
shouldFail: true,
},
"prebound-pvc": {
podPVCs: []*v1.PersistentVolumeClaim{preboundPVC},
pvs: []*v1.PersistentVolume{pvNode1aBound},
expectedUnbound: true,
expectedBound: true,
podPVCs: []*v1.PersistentVolumeClaim{preboundPVC},
pvs: []*v1.PersistentVolume{pvNode1aBound},
shouldFail: true,
},
"unbound-pvc,pv-same-node": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
@@ -621,11 +688,9 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
expectedBound: true,
},
"one-prebound,one-unbound": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, preboundPVC},
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
expectedBindings: []*bindingInfo{binding1a},
expectedUnbound: true,
expectedBound: true,
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, preboundPVC},
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
shouldFail: true,
},
"immediate-bound-pvc": {
podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC},
@@ -776,9 +841,9 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
},
}
// Set VolumeScheduling and DynamicProvisioningScheduling feature gate
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true,DynamicProvisioningScheduling=true")
defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false,DynamicProvisioningScheduling=false")
// Set VolumeScheduling feature gate
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true")
defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false")
testNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
@@ -834,69 +899,64 @@ func TestAssumePodVolumes(t *testing.T) {
provisionedPVCs []*v1.PersistentVolumeClaim
// Expected return values
shouldFail bool
expectedBindingRequired bool
expectedAllBound bool
shouldFail bool
expectedAllBound bool
// if nil, use bindings
expectedBindings []*bindingInfo
expectedBindings []*bindingInfo
expectedProvisionings []*v1.PersistentVolumeClaim
}{
"all-bound": {
podPVCs: []*v1.PersistentVolumeClaim{boundPVC},
pvs: []*v1.PersistentVolume{pvBound},
expectedAllBound: true,
},
"prebound-pvc": {
podPVCs: []*v1.PersistentVolumeClaim{preboundPVC},
pvs: []*v1.PersistentVolume{pvNode1a},
},
"one-binding": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
bindings: []*bindingInfo{binding1a},
pvs: []*v1.PersistentVolume{pvNode1a},
expectedBindingRequired: true,
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
bindings: []*bindingInfo{binding1a},
pvs: []*v1.PersistentVolume{pvNode1a},
expectedBindings: []*bindingInfo{binding1aBound},
expectedProvisionings: []*v1.PersistentVolumeClaim{},
},
"two-bindings": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2},
bindings: []*bindingInfo{binding1a, binding1b},
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
expectedBindingRequired: true,
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2},
bindings: []*bindingInfo{binding1a, binding1b},
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
expectedBindings: []*bindingInfo{binding1aBound, binding1bBound},
expectedProvisionings: []*v1.PersistentVolumeClaim{},
},
"pv-already-bound": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
bindings: []*bindingInfo{binding1aBound},
pvs: []*v1.PersistentVolume{pvNode1aBound},
expectedBindingRequired: false,
expectedBindings: []*bindingInfo{},
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
bindings: []*bindingInfo{binding1aBound},
pvs: []*v1.PersistentVolume{pvNode1aBound},
expectedBindings: []*bindingInfo{binding1aBound},
expectedProvisionings: []*v1.PersistentVolumeClaim{},
},
"claimref-failed": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
bindings: []*bindingInfo{binding1a, bindingBad},
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
shouldFail: true,
expectedBindingRequired: true,
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
bindings: []*bindingInfo{binding1a, bindingBad},
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
shouldFail: true,
},
"tmpupdate-failed": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
bindings: []*bindingInfo{binding1a, binding1b},
pvs: []*v1.PersistentVolume{pvNode1a},
shouldFail: true,
expectedBindingRequired: true,
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
bindings: []*bindingInfo{binding1a, binding1b},
pvs: []*v1.PersistentVolume{pvNode1a},
shouldFail: true,
},
"one-binding, one-pvc-provisioned": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC},
bindings: []*bindingInfo{binding1a},
pvs: []*v1.PersistentVolume{pvNode1a},
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
expectedBindingRequired: true,
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC},
bindings: []*bindingInfo{binding1a},
pvs: []*v1.PersistentVolume{pvNode1a},
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
expectedBindings: []*bindingInfo{binding1aBound},
expectedProvisionings: []*v1.PersistentVolumeClaim{selectedNodePVC},
},
"one-binding, one-provision-tmpupdate-failed": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVCHigherVersion},
bindings: []*bindingInfo{binding1a},
pvs: []*v1.PersistentVolume{pvNode1a},
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC2},
shouldFail: true,
expectedBindingRequired: true,
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVCHigherVersion},
bindings: []*bindingInfo{binding1a},
pvs: []*v1.PersistentVolume{pvNode1a},
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC2},
shouldFail: true,
},
}
@@ -911,7 +971,7 @@ func TestAssumePodVolumes(t *testing.T) {
testEnv.initVolumes(scenario.pvs, scenario.pvs)
// Execute
allBound, bindingRequired, err := testEnv.binder.AssumePodVolumes(pod, "node1")
allBound, err := testEnv.binder.AssumePodVolumes(pod, "node1")
// Validate
if !scenario.shouldFail && err != nil {
@@ -920,24 +980,25 @@ func TestAssumePodVolumes(t *testing.T) {
if scenario.shouldFail && err == nil {
t.Errorf("Test %q failed: returned success but expected error", name)
}
if scenario.expectedBindingRequired != bindingRequired {
t.Errorf("Test %q failed: returned unexpected bindingRequired: %v", name, bindingRequired)
}
if scenario.expectedAllBound != allBound {
t.Errorf("Test %q failed: returned unexpected allBound: %v", name, allBound)
}
if scenario.expectedBindings == nil {
scenario.expectedBindings = scenario.bindings
}
if scenario.shouldFail {
testEnv.validateFailedAssume(t, name, pod, scenario.expectedBindings, scenario.provisionedPVCs)
} else {
testEnv.validateAssume(t, name, pod, scenario.expectedBindings, scenario.provisionedPVCs)
if scenario.expectedProvisionings == nil {
scenario.expectedProvisionings = scenario.provisionedPVCs
}
if scenario.shouldFail {
testEnv.validateFailedAssume(t, name, pod, scenario.expectedBindings, scenario.expectedProvisionings)
} else {
testEnv.validateAssume(t, name, pod, scenario.expectedBindings, scenario.expectedProvisionings)
}
testEnv.validatePodCache(t, name, pod.Spec.NodeName, pod, scenario.expectedBindings, scenario.expectedProvisionings)
}
}
func TestBindPodVolumes(t *testing.T) {
func TestBindAPIUpdate(t *testing.T) {
scenarios := map[string]struct {
// Inputs
bindings []*bindingInfo
@@ -960,34 +1021,56 @@ func TestBindPodVolumes(t *testing.T) {
// if nil, use expectedPVCs
expectedAPIPVCs []*v1.PersistentVolumeClaim
}{
"all-bound": {},
"not-fully-bound": {
bindings: []*bindingInfo{},
"nothing-to-bind-nil": {
shouldFail: true,
},
"nothing-to-bind-bindings-nil": {
provisionedPVCs: []*v1.PersistentVolumeClaim{},
shouldFail: true,
},
"nothing-to-bind-provisionings-nil": {
bindings: []*bindingInfo{},
shouldFail: true,
},
"nothing-to-bind-empty": {
bindings: []*bindingInfo{},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
},
"one-binding": {
bindings: []*bindingInfo{binding1aBound},
cachedPVs: []*v1.PersistentVolume{pvNode1a},
expectedPVs: []*v1.PersistentVolume{pvNode1aBound},
bindings: []*bindingInfo{binding1aBound},
cachedPVs: []*v1.PersistentVolume{pvNode1a},
expectedPVs: []*v1.PersistentVolume{pvNode1aBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
},
"two-bindings": {
bindings: []*bindingInfo{binding1aBound, binding1bBound},
cachedPVs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
expectedPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBound},
bindings: []*bindingInfo{binding1aBound, binding1bBound},
cachedPVs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
expectedPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
},
"api-already-updated": {
bindings: []*bindingInfo{binding1aBound},
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
expectedPVs: []*v1.PersistentVolume{pvNode1aBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
},
"api-update-failed": {
bindings: []*bindingInfo{binding1aBound, binding1bBound},
cachedPVs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
apiPVs: []*v1.PersistentVolume{pvNode1a, pvNode1bBoundHigherVersion},
expectedPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1b},
expectedAPIPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBoundHigherVersion},
shouldFail: true,
bindings: []*bindingInfo{binding1aBound, binding1bBound},
cachedPVs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
apiPVs: []*v1.PersistentVolume{pvNode1a, pvNode1bBoundHigherVersion},
expectedPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1b},
expectedAPIPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBoundHigherVersion},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
shouldFail: true,
},
"one-provisioned-pvc": {
bindings: []*bindingInfo{},
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
cachedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
expectedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
},
"provision-api-update-failed": {
bindings: []*bindingInfo{},
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC), addProvisionAnn(provisionedPVC2)},
cachedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC, provisionedPVC2},
apiPVCs: []*v1.PersistentVolumeClaim{provisionedPVC, provisionedPVCHigherVersion},
@@ -995,7 +1078,7 @@ func TestBindPodVolumes(t *testing.T) {
expectedAPIPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC), provisionedPVCHigherVersion},
shouldFail: true,
},
"bingding-succeed, provision-api-update-failed": {
"binding-succeed, provision-api-update-failed": {
bindings: []*bindingInfo{binding1aBound},
cachedPVs: []*v1.PersistentVolume{pvNode1a},
expectedPVs: []*v1.PersistentVolume{pvNode1aBound},
@@ -1008,7 +1091,7 @@ func TestBindPodVolumes(t *testing.T) {
},
}
for name, scenario := range scenarios {
glog.V(5).Infof("Running test case %q", name)
glog.V(4).Infof("Running test case %q", name)
// Setup
testEnv := newTestBinder(t)
@@ -1024,7 +1107,7 @@ func TestBindPodVolumes(t *testing.T) {
testEnv.assumeVolumes(t, name, "node1", pod, scenario.bindings, scenario.provisionedPVCs)
// Execute
err := testEnv.binder.BindPodVolumes(pod)
err := testEnv.internalBinder.bindAPIUpdate(pod.Name, scenario.bindings, scenario.provisionedPVCs)
// Validate
if !scenario.shouldFail && err != nil {
@@ -1044,6 +1127,301 @@ func TestBindPodVolumes(t *testing.T) {
}
}
func TestCheckBindings(t *testing.T) {
scenarios := map[string]struct {
// Inputs
bindings []*bindingInfo
cachedPVs []*v1.PersistentVolume
provisionedPVCs []*v1.PersistentVolumeClaim
cachedPVCs []*v1.PersistentVolumeClaim
// Expected return values
shouldFail bool
expectedBound bool
}{
"nothing-to-bind-nil": {
shouldFail: true,
},
"nothing-to-bind-bindings-nil": {
provisionedPVCs: []*v1.PersistentVolumeClaim{},
shouldFail: true,
},
"nothing-to-bind-provisionings-nil": {
bindings: []*bindingInfo{},
shouldFail: true,
},
"nothing-to-bind": {
bindings: []*bindingInfo{},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
expectedBound: true,
},
"binding-bound": {
bindings: []*bindingInfo{binding1aBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a},
expectedBound: true,
},
"binding-prebound": {
bindings: []*bindingInfo{binding1aBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
cachedPVCs: []*v1.PersistentVolumeClaim{preboundPVCNode1a},
},
"binding-unbound": {
bindings: []*bindingInfo{binding1aBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
cachedPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
},
"binding-pvc-not-exists": {
bindings: []*bindingInfo{binding1aBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
shouldFail: true,
},
"binding-pv-not-exists": {
bindings: []*bindingInfo{binding1aBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a},
shouldFail: true,
},
"binding-claimref-nil": {
bindings: []*bindingInfo{binding1aBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
cachedPVs: []*v1.PersistentVolume{pvNode1a},
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a},
shouldFail: true,
},
"binding-claimref-uid-empty": {
bindings: []*bindingInfo{binding1aBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
cachedPVs: []*v1.PersistentVolume{pvRemoveClaimUID(pvNode1aBound)},
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a},
shouldFail: true,
},
"binding-one-bound,one-unbound": {
bindings: []*bindingInfo{binding1aBound, binding1bBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
cachedPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBound},
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a, unboundPVC2},
},
"provisioning-pvc-bound": {
bindings: []*bindingInfo{},
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
cachedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVCBound)},
expectedBound: true,
},
"provisioning-pvc-unbound": {
bindings: []*bindingInfo{},
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
cachedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
},
"provisioning-pvc-not-exists": {
bindings: []*bindingInfo{},
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
shouldFail: true,
},
"provisioning-pvc-annotations-nil": {
bindings: []*bindingInfo{},
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
cachedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
shouldFail: true,
},
"provisioning-pvc-selected-node-dropped": {
bindings: []*bindingInfo{},
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
cachedPVCs: []*v1.PersistentVolumeClaim{pvcSetEmptyAnnotations(provisionedPVC)},
shouldFail: true,
},
"provisioning-pvc-selected-node-wrong-node": {
bindings: []*bindingInfo{},
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
cachedPVCs: []*v1.PersistentVolumeClaim{pvcSetSelectedNode(provisionedPVC, "wrong-node")},
shouldFail: true,
},
"binding-bound-provisioning-unbound": {
bindings: []*bindingInfo{binding1aBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a, addProvisionAnn(provisionedPVC)},
},
}
for name, scenario := range scenarios {
glog.V(4).Infof("Running test case %q", name)
// Setup
pod := makePod(nil)
testEnv := newTestBinder(t)
testEnv.initVolumes(scenario.cachedPVs, nil)
testEnv.initClaims(scenario.cachedPVCs, nil)
// Execute
allBound, err := testEnv.internalBinder.checkBindings(pod, scenario.bindings, scenario.provisionedPVCs)
// Validate
if !scenario.shouldFail && err != nil {
t.Errorf("Test %q failed: returned error: %v", name, err)
}
if scenario.shouldFail && err == nil {
t.Errorf("Test %q failed: returned success but expected error", name)
}
if scenario.expectedBound != allBound {
t.Errorf("Test %q failed: returned bound %v", name, allBound)
}
}
}
func TestBindPodVolumes(t *testing.T) {
scenarios := map[string]struct {
// Inputs
// These tests only support a single pv and pvc and static binding
bindingsNil bool // Pass in nil bindings slice
binding *bindingInfo
cachedPV *v1.PersistentVolume
cachedPVC *v1.PersistentVolumeClaim
apiPV *v1.PersistentVolume
// This function runs with a delay of 5 seconds
delayFunc func(*testing.T, *testEnv, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim)
// Expected return values
shouldFail bool
}{
"nothing-to-bind-nil": {
bindingsNil: true,
shouldFail: true,
},
"nothing-to-bind-empty": {},
"already-bound": {
binding: binding1aBound,
cachedPV: pvNode1aBound,
cachedPVC: boundPVCNode1a,
},
"binding-succeeds-after-time": {
binding: binding1aBound,
cachedPV: pvNode1a,
cachedPVC: unboundPVC,
delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
// Update PVC to be fully bound to PV
newPVC := pvc.DeepCopy()
newPVC.ResourceVersion = "100"
newPVC.Spec.VolumeName = pv.Name
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, annBindCompleted, "yes")
// Update pvc cache, fake client doesn't invoke informers
internalBinder, ok := testEnv.binder.(*volumeBinder)
if !ok {
t.Fatalf("Failed to convert to internal binder")
}
pvcCache := internalBinder.pvcCache
internalPVCCache, ok := pvcCache.(*pvcAssumeCache)
if !ok {
t.Fatalf("Failed to convert to internal PVC cache")
}
internalPVCCache.add(newPVC)
},
},
"pod-deleted-after-time": {
binding: binding1aBound,
cachedPV: pvNode1a,
cachedPVC: unboundPVC,
delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
bindingsCache := testEnv.binder.GetBindingsCache()
if bindingsCache == nil {
t.Fatalf("Failed to get bindings cache")
}
// Delete the pod from the cache
bindingsCache.DeleteBindings(pod)
// Check that it's deleted
bindings := bindingsCache.GetBindings(pod, "node1")
if bindings != nil {
t.Fatalf("Failed to delete bindings")
}
},
shouldFail: true,
},
"binding-times-out": {
binding: binding1aBound,
cachedPV: pvNode1a,
cachedPVC: unboundPVC,
shouldFail: true,
},
"binding-fails": {
binding: binding1bBound,
cachedPV: pvNode1b,
apiPV: pvNode1bBoundHigherVersion,
cachedPVC: unboundPVC2,
shouldFail: true,
},
"check-fails": {
binding: binding1aBound,
cachedPV: pvNode1a,
cachedPVC: unboundPVC,
delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
// Delete PVC
// Update pvc cache, fake client doesn't invoke informers
internalBinder, ok := testEnv.binder.(*volumeBinder)
if !ok {
t.Fatalf("Failed to convert to internal binder")
}
pvcCache := internalBinder.pvcCache
internalPVCCache, ok := pvcCache.(*pvcAssumeCache)
if !ok {
t.Fatalf("Failed to convert to internal PVC cache")
}
internalPVCCache.delete(pvc)
},
shouldFail: true,
},
}
for name, scenario := range scenarios {
glog.V(4).Infof("Running test case %q", name)
// Setup
pod := makePod(nil)
if scenario.apiPV == nil {
scenario.apiPV = scenario.cachedPV
}
testEnv := newTestBinder(t)
if !scenario.bindingsNil {
if scenario.binding != nil {
testEnv.initVolumes([]*v1.PersistentVolume{scenario.cachedPV}, []*v1.PersistentVolume{scenario.apiPV})
testEnv.initClaims([]*v1.PersistentVolumeClaim{scenario.cachedPVC}, nil)
testEnv.assumeVolumes(t, name, "node1", pod, []*bindingInfo{scenario.binding}, []*v1.PersistentVolumeClaim{})
} else {
testEnv.assumeVolumes(t, name, "node1", pod, []*bindingInfo{}, []*v1.PersistentVolumeClaim{})
}
}
if scenario.delayFunc != nil {
go func() {
time.Sleep(5 * time.Second)
glog.V(5).Infof("Running delay function")
scenario.delayFunc(t, testEnv, pod, scenario.binding.pv, scenario.binding.pvc)
}()
}
// Execute
err := testEnv.binder.BindPodVolumes(pod)
// Validate
if !scenario.shouldFail && err != nil {
t.Errorf("Test %q failed: returned error: %v", name, err)
}
if scenario.shouldFail && err == nil {
t.Errorf("Test %q failed: returned success but expected error", name)
}
}
}
func TestFindAssumeVolumes(t *testing.T) {
// Set feature gate
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true")
@@ -1080,17 +1458,15 @@ func TestFindAssumeVolumes(t *testing.T) {
expectedBindings := testEnv.getPodBindings(t, "before-assume", testNode.Name, pod)
// 2. Assume matches
allBound, bindingRequired, err := testEnv.binder.AssumePodVolumes(pod, testNode.Name)
allBound, err := testEnv.binder.AssumePodVolumes(pod, testNode.Name)
if err != nil {
t.Errorf("Test failed: AssumePodVolumes returned error: %v", err)
}
if allBound {
t.Errorf("Test failed: detected unbound volumes as bound")
}
if !bindingRequired {
t.Errorf("Test failed: binding not required")
}
testEnv.validateAssume(t, "assume", pod, expectedBindings, nil)
// After assume, claimref should be set on pv
expectedBindings = testEnv.getPodBindings(t, "after-assume", testNode.Name, pod)
@@ -1106,6 +1482,6 @@ func TestFindAssumeVolumes(t *testing.T) {
if !unboundSatisfied {
t.Errorf("Test failed: couldn't find PVs for all PVCs")
}
testEnv.validatePodCache(t, "after-assume", testNode.Name, pod, expectedBindings, nil)
testEnv.validatePodCache(t, "after-assume", testNode.Name, pod, expectedBindings, []*v1.PersistentVolumeClaim{})
}
}

View File

@@ -25,8 +25,8 @@ import (
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/util/mount"
vol "k8s.io/kubernetes/pkg/volume"
)
@@ -79,10 +79,6 @@ func (ctrl *PersistentVolumeController) GetMounter(pluginName string) mount.Inte
return nil
}
func (ctrl *PersistentVolumeController) GetWriter() io.Writer {
return nil
}
func (ctrl *PersistentVolumeController) GetHostName() string {
return ""
}
@@ -128,3 +124,8 @@ func (ctrl *PersistentVolumeController) GetNodeName() types.NodeName {
func (ctrl *PersistentVolumeController) GetEventRecorder() record.EventRecorder {
return ctrl.eventRecorder
}
func (ctrl *PersistentVolumeController) GetCSIClient() csiclientset.Interface {
// No volume plugin needs csi.storage.k8s.io client in PV controller.
return nil
}

View File

@@ -10,17 +10,17 @@ go_library(
"//pkg/util/metrics:go_default_library",
"//pkg/util/slice:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
@@ -31,17 +31,17 @@ go_test(
deps = [
"//pkg/controller:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
],
)

View File

@@ -221,11 +221,6 @@ func (c *Controller) isBeingUsed(pvc *v1.PersistentVolumeClaim) (bool, error) {
glog.V(4).Infof("Skipping unscheduled pod %s when checking PVC %s/%s", pod.Name, pvc.Namespace, pvc.Name)
continue
}
if volumeutil.IsPodTerminated(pod, pod.Status) {
// This pod is being unmounted/detached or is already
// unmounted/detached. It does not block the PVC from deletion.
continue
}
for _, volume := range pod.Spec.Volumes {
if volume.PersistentVolumeClaim == nil {
continue

View File

@@ -264,14 +264,12 @@ func TestPVCProtectionController(t *testing.T) {
storageObjectInUseProtectionEnabled: true,
},
{
name: "deleted PVC with finalizer + pods with the PVC and is finished -> finalizer is removed",
name: "deleted PVC with finalizer + pods with the PVC finished but is not deleted -> finalizer is not removed",
initialObjects: []runtime.Object{
withStatus(v1.PodFailed, withPVC(defaultPVCName, pod())),
},
updatedPVC: deleted(withProtectionFinalizer(pvc())),
expectedActions: []clienttesting.Action{
clienttesting.NewUpdateAction(pvcVer, defaultNS, deleted(pvc())),
},
updatedPVC: deleted(withProtectionFinalizer(pvc())),
expectedActions: []clienttesting.Action{},
storageObjectInUseProtectionEnabled: true,
},
//
@@ -287,14 +285,12 @@ func TestPVCProtectionController(t *testing.T) {
storageObjectInUseProtectionEnabled: true,
},
{
name: "updated finished Pod -> finalizer is removed",
name: "updated finished Pod -> finalizer is not removed",
initialObjects: []runtime.Object{
deleted(withProtectionFinalizer(pvc())),
},
updatedPod: withStatus(v1.PodSucceeded, withPVC(defaultPVCName, pod())),
expectedActions: []clienttesting.Action{
clienttesting.NewUpdateAction(pvcVer, defaultNS, deleted(pvc())),
},
updatedPod: withStatus(v1.PodSucceeded, withPVC(defaultPVCName, pod())),
expectedActions: []clienttesting.Action{},
storageObjectInUseProtectionEnabled: true,
},
{

View File

@@ -10,16 +10,16 @@ go_library(
"//pkg/util/metrics:go_default_library",
"//pkg/util/slice:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
@@ -44,16 +44,16 @@ go_test(
deps = [
"//pkg/controller:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
],
)

View File

@@ -163,7 +163,7 @@ func (c *Controller) addFinalizer(pv *v1.PersistentVolume) error {
pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer)
_, err := c.client.CoreV1().PersistentVolumes().Update(pvClone)
if err != nil {
glog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name)
glog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name, err)
return err
}
glog.V(3).Infof("Added protection finalizer to PV %s", pv.Name)