Bumping k8s dependencies to 1.13
This commit is contained in:
92
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD
generated
vendored
92
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD
generated
vendored
@@ -28,36 +28,36 @@ go_library(
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/goroutinemap:go_default_library",
|
||||
"//pkg/util/goroutinemap/exponentialbackoff:go_default_library",
|
||||
"//pkg/util/io:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/recyclerclient:go_default_library",
|
||||
"//staging/src/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/reference:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/reference:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -83,28 +83,28 @@ go_test(
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/recyclerclient:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/reference:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/reference:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/OWNERS
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/OWNERS
generated
vendored
@@ -3,3 +3,9 @@ approvers:
|
||||
- saad-ali
|
||||
- thockin
|
||||
- msau42 # for volume scheduling
|
||||
reviewers:
|
||||
- jsafrane
|
||||
- saad-ali
|
||||
- thockin
|
||||
- msau42 # for volume scheduling
|
||||
- lichuqiang # for volume scheduling
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/BUILD
generated
vendored
@@ -10,9 +10,9 @@ go_library(
|
||||
srcs = ["metrics.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics",
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
27
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/metrics.go
generated
vendored
27
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/metrics.go
generated
vendored
@@ -56,6 +56,8 @@ type PVCLister interface {
|
||||
func Register(pvLister PVLister, pvcLister PVCLister) {
|
||||
registerMetrics.Do(func() {
|
||||
prometheus.MustRegister(newPVAndPVCCountCollector(pvLister, pvcLister))
|
||||
prometheus.MustRegister(volumeOperationMetric)
|
||||
prometheus.MustRegister(volumeOperationErrorsMetric)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -89,6 +91,19 @@ var (
|
||||
prometheus.BuildFQName("", pvControllerSubsystem, unboundPVCKey),
|
||||
"Gauge measuring number of persistent volume claim currently unbound",
|
||||
[]string{namespaceLabel}, nil)
|
||||
|
||||
volumeOperationMetric = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "volume_operation_total_seconds",
|
||||
Help: "Total volume operation time",
|
||||
},
|
||||
[]string{"plugin_name", "operation_name"})
|
||||
volumeOperationErrorsMetric = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "volume_operation_total_errors",
|
||||
Help: "Total volume operation erros",
|
||||
},
|
||||
[]string{"plugin_name", "operation_name"})
|
||||
)
|
||||
|
||||
func (collector *pvAndPVCCountCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
@@ -182,3 +197,15 @@ func (collector *pvAndPVCCountCollector) pvcCollect(ch chan<- prometheus.Metric)
|
||||
ch <- metric
|
||||
}
|
||||
}
|
||||
|
||||
// RecordVolumeOperationMetric records the latency and errors of volume operations.
|
||||
func RecordVolumeOperationMetric(pluginName, opName string, timeTaken float64, err error) {
|
||||
if pluginName == "" {
|
||||
pluginName = "N/A"
|
||||
}
|
||||
if err != nil {
|
||||
volumeOperationErrorsMetric.WithLabelValues(pluginName, opName).Inc()
|
||||
return
|
||||
}
|
||||
volumeOperationMetric.WithLabelValues(pluginName, opName).Observe(timeTaken)
|
||||
}
|
||||
|
150
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go
generated
vendored
150
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go
generated
vendored
@@ -40,6 +40,7 @@ import (
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/events"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap"
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
|
||||
@@ -122,6 +123,8 @@ const annBindCompleted = "pv.kubernetes.io/bind-completed"
|
||||
// the binding (PV->PVC or PVC->PV) was installed by the controller. The
|
||||
// absence of this annotation means the binding was done by the user (i.e.
|
||||
// pre-bound). Value of this annotation does not matter.
|
||||
// External PV binders must bind PV the same way as PV controller, otherwise PV
|
||||
// controller may not handle it correctly.
|
||||
const annBoundByController = "pv.kubernetes.io/bound-by-controller"
|
||||
|
||||
// This annotation is added to a PV that has been dynamically provisioned by
|
||||
@@ -137,7 +140,7 @@ const annStorageProvisioner = "volume.beta.kubernetes.io/storage-provisioner"
|
||||
|
||||
// This annotation is added to a PVC that has been triggered by scheduler to
|
||||
// be dynamically provisioned. Its value is the name of the selected node.
|
||||
const annSelectedNode = "volume.alpha.kubernetes.io/selected-node"
|
||||
const annSelectedNode = "volume.kubernetes.io/selected-node"
|
||||
|
||||
// If the provisioner name in a storage class is set to "kubernetes.io/no-provisioner",
|
||||
// then dynamic provisioning is not supported by the storage.
|
||||
@@ -264,7 +267,7 @@ func checkVolumeSatisfyClaim(volume *v1.PersistentVolume, claim *v1.PersistentVo
|
||||
|
||||
requestedClass := v1helper.GetPersistentVolumeClaimClass(claim)
|
||||
if v1helper.GetPersistentVolumeClass(volume) != requestedClass {
|
||||
return fmt.Errorf("storageClasseName does not match")
|
||||
return fmt.Errorf("storageClassName does not match")
|
||||
}
|
||||
|
||||
isMisMatch, err := checkVolumeModeMisMatches(&claim.Spec, &volume.Spec)
|
||||
@@ -287,14 +290,12 @@ func (ctrl *PersistentVolumeController) shouldDelayBinding(claim *v1.PersistentV
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
|
||||
// When feature DynamicProvisioningScheduling enabled,
|
||||
// Scheduler signal to the PV controller to start dynamic
|
||||
// provisioning by setting the "annSelectedNode" annotation
|
||||
// in the PVC
|
||||
if _, ok := claim.Annotations[annSelectedNode]; ok {
|
||||
return false, nil
|
||||
}
|
||||
// When feature VolumeScheduling enabled,
|
||||
// Scheduler signal to the PV controller to start dynamic
|
||||
// provisioning by setting the "annSelectedNode" annotation
|
||||
// in the PVC
|
||||
if _, ok := claim.Annotations[annSelectedNode]; ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
className := v1helper.GetPersistentVolumeClaimClass(claim)
|
||||
@@ -544,6 +545,30 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !found && metav1.HasAnnotation(volume.ObjectMeta, annBoundByController) {
|
||||
// If PV is bound by external PV binder (e.g. kube-scheduler), it's
|
||||
// possible on heavy load that corresponding PVC is not synced to
|
||||
// controller local cache yet. So we need to double-check PVC in
|
||||
// 1) informer cache
|
||||
// 2) apiserver if not found in informer cache
|
||||
// to make sure we will not reclaim a PV wrongly.
|
||||
// Note that only non-released and non-failed volumes will be
|
||||
// updated to Released state when PVC does not eixst.
|
||||
if volume.Status.Phase != v1.VolumeReleased && volume.Status.Phase != v1.VolumeFailed {
|
||||
obj, err = ctrl.claimLister.PersistentVolumeClaims(volume.Spec.ClaimRef.Namespace).Get(volume.Spec.ClaimRef.Name)
|
||||
if err != nil && !apierrs.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
found = !apierrs.IsNotFound(err)
|
||||
if !found {
|
||||
obj, err = ctrl.kubeClient.CoreV1().PersistentVolumeClaims(volume.Spec.ClaimRef.Namespace).Get(volume.Spec.ClaimRef.Name, metav1.GetOptions{})
|
||||
if err != nil && !apierrs.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
found = !apierrs.IsNotFound(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
glog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s not found", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef))
|
||||
// Fall through with claim = nil
|
||||
@@ -1058,8 +1083,12 @@ func (ctrl *PersistentVolumeController) reclaimVolume(volume *v1.PersistentVolum
|
||||
case v1.PersistentVolumeReclaimDelete:
|
||||
glog.V(4).Infof("reclaimVolume[%s]: policy is Delete", volume.Name)
|
||||
opName := fmt.Sprintf("delete-%s[%s]", volume.Name, string(volume.UID))
|
||||
startTime := time.Now()
|
||||
ctrl.scheduleOperation(opName, func() error {
|
||||
return ctrl.deleteVolumeOperation(volume)
|
||||
pluginName, err := ctrl.deleteVolumeOperation(volume)
|
||||
timeTaken := time.Since(startTime).Seconds()
|
||||
metrics.RecordVolumeOperationMetric(pluginName, "delete", timeTaken, err)
|
||||
return err
|
||||
})
|
||||
|
||||
default:
|
||||
@@ -1157,7 +1186,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis
|
||||
|
||||
// deleteVolumeOperation deletes a volume. This method is running in standalone
|
||||
// goroutine and already has all necessary locks.
|
||||
func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.PersistentVolume) error {
|
||||
func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.PersistentVolume) (string, error) {
|
||||
glog.V(4).Infof("deleteVolumeOperation [%s] started", volume.Name)
|
||||
|
||||
// This method may have been waiting for a volume lock for some time.
|
||||
@@ -1166,19 +1195,19 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.Persist
|
||||
newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
glog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err)
|
||||
return nil
|
||||
return "", nil
|
||||
}
|
||||
needsReclaim, err := ctrl.isVolumeReleased(newVolume)
|
||||
if err != nil {
|
||||
glog.V(3).Infof("error reading claim for volume %q: %v", volume.Name, err)
|
||||
return nil
|
||||
return "", nil
|
||||
}
|
||||
if !needsReclaim {
|
||||
glog.V(3).Infof("volume %q no longer needs deletion, skipping", volume.Name)
|
||||
return nil
|
||||
return "", nil
|
||||
}
|
||||
|
||||
deleted, err := ctrl.doDeleteVolume(volume)
|
||||
pluginName, deleted, err := ctrl.doDeleteVolume(volume)
|
||||
if err != nil {
|
||||
// Delete failed, update the volume and emit an event.
|
||||
glog.V(3).Infof("deletion of volume %q failed: %v", volume.Name, err)
|
||||
@@ -1192,17 +1221,17 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.Persist
|
||||
if _, err := ctrl.updateVolumePhaseWithEvent(volume, v1.VolumeFailed, v1.EventTypeWarning, events.VolumeFailedDelete, err.Error()); err != nil {
|
||||
glog.V(4).Infof("deleteVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err)
|
||||
// Save failed, retry on the next deletion attempt
|
||||
return err
|
||||
return pluginName, err
|
||||
}
|
||||
}
|
||||
|
||||
// Despite the volume being Failed, the controller will retry deleting
|
||||
// the volume in every syncVolume() call.
|
||||
return err
|
||||
return pluginName, err
|
||||
}
|
||||
if !deleted {
|
||||
// The volume waits for deletion by an external plugin. Do nothing.
|
||||
return nil
|
||||
return pluginName, nil
|
||||
}
|
||||
|
||||
glog.V(4).Infof("deleteVolumeOperation [%s]: success", volume.Name)
|
||||
@@ -1213,9 +1242,9 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.Persist
|
||||
// cache of "recently deleted volumes" and avoid unnecessary deletion,
|
||||
// this is left out as future optimization.
|
||||
glog.V(3).Infof("failed to delete volume %q from database: %v", volume.Name, err)
|
||||
return nil
|
||||
return pluginName, nil
|
||||
}
|
||||
return nil
|
||||
return pluginName, nil
|
||||
}
|
||||
|
||||
// isVolumeReleased returns true if given volume is released and can be recycled
|
||||
@@ -1292,43 +1321,44 @@ func (ctrl *PersistentVolumeController) isVolumeUsed(pv *v1.PersistentVolume) ([
|
||||
return podNames.List(), podNames.Len() != 0, nil
|
||||
}
|
||||
|
||||
// doDeleteVolume finds appropriate delete plugin and deletes given volume. It
|
||||
// returns 'true', when the volume was deleted and 'false' when the volume
|
||||
// cannot be deleted because of the deleter is external. No error should be
|
||||
// reported in this case.
|
||||
func (ctrl *PersistentVolumeController) doDeleteVolume(volume *v1.PersistentVolume) (bool, error) {
|
||||
// doDeleteVolume finds appropriate delete plugin and deletes given volume, returning
|
||||
// the volume plugin name. Also, it returns 'true', when the volume was deleted and
|
||||
// 'false' when the volume cannot be deleted because of the deleter is external. No
|
||||
// error should be reported in this case.
|
||||
func (ctrl *PersistentVolumeController) doDeleteVolume(volume *v1.PersistentVolume) (string, bool, error) {
|
||||
glog.V(4).Infof("doDeleteVolume [%s]", volume.Name)
|
||||
var err error
|
||||
|
||||
plugin, err := ctrl.findDeletablePlugin(volume)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return "", false, err
|
||||
}
|
||||
if plugin == nil {
|
||||
// External deleter is requested, do nothing
|
||||
glog.V(3).Infof("external deleter for volume %q requested, ignoring", volume.Name)
|
||||
return false, nil
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
// Plugin found
|
||||
glog.V(5).Infof("found a deleter plugin %q for volume %q", plugin.GetPluginName(), volume.Name)
|
||||
pluginName := plugin.GetPluginName()
|
||||
glog.V(5).Infof("found a deleter plugin %q for volume %q", pluginName, volume.Name)
|
||||
spec := vol.NewSpecFromPersistentVolume(volume, false)
|
||||
deleter, err := plugin.NewDeleter(spec)
|
||||
if err != nil {
|
||||
// Cannot create deleter
|
||||
return false, fmt.Errorf("Failed to create deleter for volume %q: %v", volume.Name, err)
|
||||
return pluginName, false, fmt.Errorf("Failed to create deleter for volume %q: %v", volume.Name, err)
|
||||
}
|
||||
|
||||
opComplete := util.OperationCompleteHook(plugin.GetPluginName(), "volume_delete")
|
||||
opComplete := util.OperationCompleteHook(pluginName, "volume_delete")
|
||||
err = deleter.Delete()
|
||||
opComplete(&err)
|
||||
if err != nil {
|
||||
// Deleter failed
|
||||
return false, err
|
||||
return pluginName, false, err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("volume %q deleted", volume.Name)
|
||||
return true, nil
|
||||
return pluginName, true, nil
|
||||
}
|
||||
|
||||
// provisionClaim starts new asynchronous operation to provision a claim if
|
||||
@@ -1339,16 +1369,19 @@ func (ctrl *PersistentVolumeController) provisionClaim(claim *v1.PersistentVolum
|
||||
}
|
||||
glog.V(4).Infof("provisionClaim[%s]: started", claimToClaimKey(claim))
|
||||
opName := fmt.Sprintf("provision-%s[%s]", claimToClaimKey(claim), string(claim.UID))
|
||||
startTime := time.Now()
|
||||
ctrl.scheduleOperation(opName, func() error {
|
||||
ctrl.provisionClaimOperation(claim)
|
||||
return nil
|
||||
pluginName, err := ctrl.provisionClaimOperation(claim)
|
||||
timeTaken := time.Since(startTime).Seconds()
|
||||
metrics.RecordVolumeOperationMetric(pluginName, "provision", timeTaken, err)
|
||||
return err
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// provisionClaimOperation provisions a volume. This method is running in
|
||||
// standalone goroutine and already has all necessary locks.
|
||||
func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.PersistentVolumeClaim) {
|
||||
func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.PersistentVolumeClaim) (string, error) {
|
||||
claimClass := v1helper.GetPersistentVolumeClaimClass(claim)
|
||||
glog.V(4).Infof("provisionClaimOperation [%s] started, class: %q", claimToClaimKey(claim), claimClass)
|
||||
|
||||
@@ -1358,7 +1391,12 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
|
||||
glog.V(2).Infof("error finding provisioning plugin for claim %s: %v", claimToClaimKey(claim), err)
|
||||
// The controller will retry provisioning the volume in every
|
||||
// syncVolume() call.
|
||||
return
|
||||
return "", err
|
||||
}
|
||||
|
||||
var pluginName string
|
||||
if plugin != nil {
|
||||
pluginName = plugin.GetPluginName()
|
||||
}
|
||||
|
||||
// Add provisioner annotation so external provisioners know when to start
|
||||
@@ -1366,7 +1404,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
|
||||
if err != nil {
|
||||
// Save failed, the controller will retry in the next sync
|
||||
glog.V(2).Infof("error saving claim %s: %v", claimToClaimKey(claim), err)
|
||||
return
|
||||
return pluginName, err
|
||||
}
|
||||
claim = newClaim
|
||||
|
||||
@@ -1377,7 +1415,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
|
||||
msg := fmt.Sprintf("waiting for a volume to be created, either by external provisioner %q or manually created by system administrator", storageClass.Provisioner)
|
||||
ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, events.ExternalProvisioning, msg)
|
||||
glog.V(3).Infof("provisioning claim %q: %s", claimToClaimKey(claim), msg)
|
||||
return
|
||||
return pluginName, nil
|
||||
}
|
||||
|
||||
// internal provisioning
|
||||
@@ -1391,7 +1429,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
|
||||
if err == nil && volume != nil {
|
||||
// Volume has been already provisioned, nothing to do.
|
||||
glog.V(4).Infof("provisionClaimOperation [%s]: volume already exists, skipping", claimToClaimKey(claim))
|
||||
return
|
||||
return pluginName, err
|
||||
}
|
||||
|
||||
// Prepare a claimRef to the claim early (to fail before a volume is
|
||||
@@ -1399,7 +1437,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
|
||||
claimRef, err := ref.GetReference(scheme.Scheme, claim)
|
||||
if err != nil {
|
||||
glog.V(3).Infof("unexpected error getting claim reference: %v", err)
|
||||
return
|
||||
return pluginName, err
|
||||
}
|
||||
|
||||
// Gather provisioning options
|
||||
@@ -1424,7 +1462,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
|
||||
strerr := fmt.Sprintf("Mount options are not supported by the provisioner but StorageClass %q has mount options %v", storageClass.Name, options.MountOptions)
|
||||
glog.V(2).Infof("Mount options are not supported by the provisioner but claim %q's StorageClass %q has mount options %v", claimToClaimKey(claim), storageClass.Name, options.MountOptions)
|
||||
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
|
||||
return
|
||||
return pluginName, fmt.Errorf("provisioner %q doesn't support mount options", plugin.GetPluginName())
|
||||
}
|
||||
|
||||
// Provision the volume
|
||||
@@ -1433,29 +1471,26 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
|
||||
strerr := fmt.Sprintf("Failed to create provisioner: %v", err)
|
||||
glog.V(2).Infof("failed to create provisioner for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err)
|
||||
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
|
||||
return
|
||||
return pluginName, err
|
||||
}
|
||||
|
||||
var selectedNode *v1.Node = nil
|
||||
var allowedTopologies []v1.TopologySelectorTerm = nil
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
|
||||
if nodeName, ok := claim.Annotations[annSelectedNode]; ok {
|
||||
selectedNode, err = ctrl.NodeLister.Get(nodeName)
|
||||
if err != nil {
|
||||
strerr := fmt.Sprintf("Failed to get target node: %v", err)
|
||||
glog.V(3).Infof("unexpected error getting target node %q for claim %q: %v", nodeName, claimToClaimKey(claim), err)
|
||||
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
|
||||
return
|
||||
}
|
||||
if nodeName, ok := claim.Annotations[annSelectedNode]; ok {
|
||||
selectedNode, err = ctrl.NodeLister.Get(nodeName)
|
||||
if err != nil {
|
||||
strerr := fmt.Sprintf("Failed to get target node: %v", err)
|
||||
glog.V(3).Infof("unexpected error getting target node %q for claim %q: %v", nodeName, claimToClaimKey(claim), err)
|
||||
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
|
||||
return pluginName, err
|
||||
}
|
||||
allowedTopologies = storageClass.AllowedTopologies
|
||||
}
|
||||
allowedTopologies := storageClass.AllowedTopologies
|
||||
|
||||
opComplete := util.OperationCompleteHook(plugin.GetPluginName(), "volume_provision")
|
||||
volume, err = provisioner.Provision(selectedNode, allowedTopologies)
|
||||
opComplete(&err)
|
||||
if err != nil {
|
||||
// Other places of failure have nothing to do with DynamicProvisioningScheduling,
|
||||
// Other places of failure have nothing to do with VolumeScheduling,
|
||||
// so just let controller retry in the next sync. We'll only call func
|
||||
// rescheduleProvisioning here when the underlying provisioning actually failed.
|
||||
ctrl.rescheduleProvisioning(claim)
|
||||
@@ -1463,7 +1498,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
|
||||
strerr := fmt.Sprintf("Failed to provision volume with StorageClass %q: %v", storageClass.Name, err)
|
||||
glog.V(2).Infof("failed to provision volume for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err)
|
||||
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
|
||||
return
|
||||
return pluginName, err
|
||||
}
|
||||
|
||||
glog.V(3).Infof("volume %q for claim %q created", volume.Name, claimToClaimKey(claim))
|
||||
@@ -1518,7 +1553,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
|
||||
var deleteErr error
|
||||
var deleted bool
|
||||
for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ {
|
||||
deleted, deleteErr = ctrl.doDeleteVolume(volume)
|
||||
_, deleted, deleteErr = ctrl.doDeleteVolume(volume)
|
||||
if deleteErr == nil && deleted {
|
||||
// Delete succeeded
|
||||
glog.V(4).Infof("provisionClaimOperation [%s]: cleaning volume %s succeeded", claimToClaimKey(claim), volume.Name)
|
||||
@@ -1548,6 +1583,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
|
||||
msg := fmt.Sprintf("Successfully provisioned volume %s using %s", volume.Name, plugin.GetPluginName())
|
||||
ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, events.ProvisioningSucceeded, msg)
|
||||
}
|
||||
return pluginName, nil
|
||||
}
|
||||
|
||||
// rescheduleProvisioning signal back to the scheduler to retry dynamic provisioning
|
||||
|
68
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_test.go
generated
vendored
68
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_test.go
generated
vendored
@@ -237,12 +237,21 @@ func addVolumeAnnotation(volume *v1.PersistentVolume, annName, annValue string)
|
||||
return volume
|
||||
}
|
||||
|
||||
func makePVCClass(scName *string) *v1.PersistentVolumeClaim {
|
||||
return &v1.PersistentVolumeClaim{
|
||||
func makePVCClass(scName *string, hasSelectNodeAnno bool) *v1.PersistentVolumeClaim {
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
StorageClassName: scName,
|
||||
},
|
||||
}
|
||||
|
||||
if hasSelectNodeAnno {
|
||||
claim.Annotations[annSelectedNode] = "node-name"
|
||||
}
|
||||
|
||||
return claim
|
||||
}
|
||||
|
||||
func makeStorageClass(scName string, mode *storagev1.VolumeBindingMode) *storagev1.StorageClass {
|
||||
@@ -271,26 +280,30 @@ func TestDelayBinding(t *testing.T) {
|
||||
shouldFail bool
|
||||
}{
|
||||
"nil-class": {
|
||||
pvc: makePVCClass(nil),
|
||||
pvc: makePVCClass(nil, false),
|
||||
shouldDelay: false,
|
||||
},
|
||||
"class-not-found": {
|
||||
pvc: makePVCClass(&classNotHere),
|
||||
pvc: makePVCClass(&classNotHere, false),
|
||||
shouldDelay: false,
|
||||
},
|
||||
"no-mode-class": {
|
||||
pvc: makePVCClass(&classNoMode),
|
||||
pvc: makePVCClass(&classNoMode, false),
|
||||
shouldDelay: false,
|
||||
shouldFail: true,
|
||||
},
|
||||
"immediate-mode-class": {
|
||||
pvc: makePVCClass(&classImmediateMode),
|
||||
pvc: makePVCClass(&classImmediateMode, false),
|
||||
shouldDelay: false,
|
||||
},
|
||||
"wait-mode-class": {
|
||||
pvc: makePVCClass(&classWaitMode),
|
||||
pvc: makePVCClass(&classWaitMode, false),
|
||||
shouldDelay: true,
|
||||
},
|
||||
"wait-mode-class-with-selectedNode": {
|
||||
pvc: makePVCClass(&classWaitMode, true),
|
||||
shouldDelay: false,
|
||||
},
|
||||
}
|
||||
|
||||
classes := []*storagev1.StorageClass{
|
||||
@@ -314,7 +327,7 @@ func TestDelayBinding(t *testing.T) {
|
||||
|
||||
// When volumeScheduling feature gate is disabled, should always be delayed
|
||||
name := "volumeScheduling-feature-disabled"
|
||||
shouldDelay, err := ctrl.shouldDelayBinding(makePVCClass(&classWaitMode))
|
||||
shouldDelay, err := ctrl.shouldDelayBinding(makePVCClass(&classWaitMode, false))
|
||||
if err != nil {
|
||||
t.Errorf("Test %q returned error: %v", name, err)
|
||||
}
|
||||
@@ -338,43 +351,4 @@ func TestDelayBinding(t *testing.T) {
|
||||
t.Errorf("Test %q returned unexpected %v", name, test.shouldDelay)
|
||||
}
|
||||
}
|
||||
|
||||
// When dynamicProvisioningScheduling feature gate is disabled, should be delayed,
|
||||
// even if the pvc has selectedNode annotation.
|
||||
provisionedClaim := makePVCClass(&classWaitMode)
|
||||
provisionedClaim.Annotations = map[string]string{annSelectedNode: "node-name"}
|
||||
name = "dynamicProvisioningScheduling-feature-disabled"
|
||||
shouldDelay, err = ctrl.shouldDelayBinding(provisionedClaim)
|
||||
if err != nil {
|
||||
t.Errorf("Test %q returned error: %v", name, err)
|
||||
}
|
||||
if !shouldDelay {
|
||||
t.Errorf("Test %q returned false, expected true", name)
|
||||
}
|
||||
|
||||
// Enable DynamicProvisioningScheduling feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("DynamicProvisioningScheduling=true")
|
||||
defer utilfeature.DefaultFeatureGate.Set("DynamicProvisioningScheduling=false")
|
||||
|
||||
// When the pvc does not have selectedNode annotation, should be delayed,
|
||||
// even if dynamicProvisioningScheduling feature gate is enabled.
|
||||
name = "dynamicProvisioningScheduling-feature-enabled, selectedNode-annotation-not-set"
|
||||
shouldDelay, err = ctrl.shouldDelayBinding(makePVCClass(&classWaitMode))
|
||||
if err != nil {
|
||||
t.Errorf("Test %q returned error: %v", name, err)
|
||||
}
|
||||
if !shouldDelay {
|
||||
t.Errorf("Test %q returned false, expected true", name)
|
||||
}
|
||||
|
||||
// Should not be delayed when dynamicProvisioningScheduling feature gate is enabled,
|
||||
// and the pvc has selectedNode annotation.
|
||||
name = "dynamicProvisioningScheduling-feature-enabled, selectedNode-annotation-set"
|
||||
shouldDelay, err = ctrl.shouldDelayBinding(provisionedClaim)
|
||||
if err != nil {
|
||||
t.Errorf("Test %q returned error: %v", name, err)
|
||||
}
|
||||
if shouldDelay {
|
||||
t.Errorf("Test %q returned true, expected false", name)
|
||||
}
|
||||
}
|
||||
|
@@ -83,7 +83,8 @@ func (e *errObjectName) Error() string {
|
||||
// Restore() sets the latest object pointer back to the informer object.
|
||||
// Get/List() always returns the latest object pointer.
|
||||
type assumeCache struct {
|
||||
mutex sync.Mutex
|
||||
// Synchronizes updates to store
|
||||
rwMutex sync.RWMutex
|
||||
|
||||
// describes the object stored
|
||||
description string
|
||||
@@ -155,8 +156,8 @@ func (c *assumeCache) add(obj interface{}) {
|
||||
return
|
||||
}
|
||||
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.Lock()
|
||||
defer c.rwMutex.Unlock()
|
||||
|
||||
if objInfo, _ := c.getObjInfo(name); objInfo != nil {
|
||||
newVersion, err := c.getObjVersion(name, obj)
|
||||
@@ -199,8 +200,8 @@ func (c *assumeCache) delete(obj interface{}) {
|
||||
return
|
||||
}
|
||||
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.Lock()
|
||||
defer c.rwMutex.Unlock()
|
||||
|
||||
objInfo := &objInfo{name: name}
|
||||
err = c.store.Delete(objInfo)
|
||||
@@ -239,8 +240,8 @@ func (c *assumeCache) getObjInfo(name string) (*objInfo, error) {
|
||||
}
|
||||
|
||||
func (c *assumeCache) Get(objName string) (interface{}, error) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.RLock()
|
||||
defer c.rwMutex.RUnlock()
|
||||
|
||||
objInfo, err := c.getObjInfo(objName)
|
||||
if err != nil {
|
||||
@@ -250,8 +251,8 @@ func (c *assumeCache) Get(objName string) (interface{}, error) {
|
||||
}
|
||||
|
||||
func (c *assumeCache) List(indexObj interface{}) []interface{} {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.RLock()
|
||||
defer c.rwMutex.RUnlock()
|
||||
|
||||
allObjs := []interface{}{}
|
||||
objs, err := c.store.Index(c.indexName, &objInfo{latestObj: indexObj})
|
||||
@@ -277,8 +278,8 @@ func (c *assumeCache) Assume(obj interface{}) error {
|
||||
return &errObjectName{err}
|
||||
}
|
||||
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.Lock()
|
||||
defer c.rwMutex.Unlock()
|
||||
|
||||
objInfo, err := c.getObjInfo(name)
|
||||
if err != nil {
|
||||
@@ -306,8 +307,8 @@ func (c *assumeCache) Assume(obj interface{}) error {
|
||||
}
|
||||
|
||||
func (c *assumeCache) Restore(objName string) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.Lock()
|
||||
defer c.rwMutex.Unlock()
|
||||
|
||||
objInfo, err := c.getObjInfo(objName)
|
||||
if err != nil {
|
||||
|
@@ -456,7 +456,7 @@ func TestAssumeUpdatePVCCache(t *testing.T) {
|
||||
|
||||
// Assume PVC
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC.Annotations["volume.alpha.kubernetes.io/selected-node"] = "test-node"
|
||||
newPVC.Annotations[annSelectedNode] = "test-node"
|
||||
if err := cache.Assume(newPVC); err != nil {
|
||||
t.Fatalf("failed to assume PVC: %v", err)
|
||||
}
|
||||
|
291
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder.go
generated
vendored
291
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder.go
generated
vendored
@@ -19,18 +19,18 @@ package persistentvolume
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
storageinformers "k8s.io/client-go/informers/storage/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
@@ -44,16 +44,19 @@ import (
|
||||
// a. Invokes all predicate functions, parallelized across nodes. FindPodVolumes() is invoked here.
|
||||
// b. Invokes all priority functions. Future/TBD
|
||||
// c. Selects the best node for the Pod.
|
||||
// d. Cache the node selection for the Pod. (Assume phase)
|
||||
// d. Cache the node selection for the Pod. AssumePodVolumes() is invoked here.
|
||||
// i. If PVC binding is required, cache in-memory only:
|
||||
// * Updated PV objects for prebinding to the corresponding PVCs.
|
||||
// * For the pod, which PVs need API updates.
|
||||
// AssumePodVolumes() is invoked here. Then BindPodVolumes() is called asynchronously by the
|
||||
// scheduler. After BindPodVolumes() is complete, the Pod is added back to the scheduler queue
|
||||
// to be processed again until all PVCs are bound.
|
||||
// ii. If PVC binding is not required, cache the Pod->Node binding in the scheduler's pod cache,
|
||||
// and asynchronously bind the Pod to the Node. This is handled in the scheduler and not here.
|
||||
// 2. Once the assume operation is done, the scheduler processes the next Pod in the scheduler queue
|
||||
// * For manual binding: update PV objects for prebinding to the corresponding PVCs.
|
||||
// * For dynamic provisioning: update PVC object with a selected node from c)
|
||||
// * For the pod, which PVCs and PVs need API updates.
|
||||
// ii. Afterwards, the main scheduler caches the Pod->Node binding in the scheduler's pod cache,
|
||||
// This is handled in the scheduler and not here.
|
||||
// e. Asynchronously bind volumes and pod in a separate goroutine
|
||||
// i. BindPodVolumes() is called first. It makes all the necessary API updates and waits for
|
||||
// PV controller to fully bind and provision the PVCs. If binding fails, the Pod is sent
|
||||
// back through the scheduler.
|
||||
// ii. After BindPodVolumes() is complete, then the scheduler does the final Pod->Node binding.
|
||||
// 2. Once all the assume operations are done in d), the scheduler processes the next Pod in the scheduler queue
|
||||
// while the actual binding operation occurs in the background.
|
||||
type SchedulerVolumeBinder interface {
|
||||
// FindPodVolumes checks if all of a Pod's PVCs can be satisfied by the node.
|
||||
@@ -73,18 +76,18 @@ type SchedulerVolumeBinder interface {
|
||||
// 2. Take the PVCs that need provisioning and update the PVC cache with related
|
||||
// annotations set.
|
||||
//
|
||||
// It returns true if all volumes are fully bound, and returns true if any volume binding/provisioning
|
||||
// API operation needs to be done afterwards.
|
||||
// It returns true if all volumes are fully bound
|
||||
//
|
||||
// This function will modify assumedPod with the node name.
|
||||
// This function is called serially.
|
||||
AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, bindingRequired bool, err error)
|
||||
AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, err error)
|
||||
|
||||
// BindPodVolumes will:
|
||||
// 1. Initiate the volume binding by making the API call to prebind the PV
|
||||
// to its matching PVC.
|
||||
// 2. Trigger the volume provisioning by making the API call to set related
|
||||
// annotations on the PVC
|
||||
// 3. Wait for PVCs to be completely bound by the PV controller
|
||||
//
|
||||
// This function can be called in parallel.
|
||||
BindPodVolumes(assumedPod *v1.Pod) error
|
||||
@@ -102,6 +105,9 @@ type volumeBinder struct {
|
||||
// Stores binding decisions that were made in FindPodVolumes for use in AssumePodVolumes.
|
||||
// AssumePodVolumes modifies the bindings again for use in BindPodVolumes.
|
||||
podBindingCache PodBindingCache
|
||||
|
||||
// Amount of time to wait for the bind operation to succeed
|
||||
bindTimeout time.Duration
|
||||
}
|
||||
|
||||
// NewVolumeBinder sets up all the caches needed for the scheduler to make volume binding decisions.
|
||||
@@ -109,7 +115,8 @@ func NewVolumeBinder(
|
||||
kubeClient clientset.Interface,
|
||||
pvcInformer coreinformers.PersistentVolumeClaimInformer,
|
||||
pvInformer coreinformers.PersistentVolumeInformer,
|
||||
storageClassInformer storageinformers.StorageClassInformer) SchedulerVolumeBinder {
|
||||
storageClassInformer storageinformers.StorageClassInformer,
|
||||
bindTimeout time.Duration) SchedulerVolumeBinder {
|
||||
|
||||
// TODO: find better way...
|
||||
ctrl := &PersistentVolumeController{
|
||||
@@ -122,6 +129,7 @@ func NewVolumeBinder(
|
||||
pvcCache: NewPVCAssumeCache(pvcInformer.Informer()),
|
||||
pvCache: NewPVAssumeCache(pvInformer.Informer()),
|
||||
podBindingCache: NewPodBindingCache(),
|
||||
bindTimeout: bindTimeout,
|
||||
}
|
||||
|
||||
return b
|
||||
@@ -151,7 +159,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
|
||||
|
||||
// Immediate claims should be bound
|
||||
if len(unboundClaimsImmediate) > 0 {
|
||||
return false, false, fmt.Errorf("pod has unbound PersistentVolumeClaims")
|
||||
return false, false, fmt.Errorf("pod has unbound immediate PersistentVolumeClaims")
|
||||
}
|
||||
|
||||
// Check PV node affinity on bound volumes
|
||||
@@ -169,13 +177,11 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
|
||||
// Try to provision for unbound volumes
|
||||
if !unboundVolumesSatisfied {
|
||||
unboundVolumesSatisfied, err = b.checkVolumeProvisions(pod, claimsToProvision, node)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
// Try to provision for unbound volumes
|
||||
if !unboundVolumesSatisfied {
|
||||
unboundVolumesSatisfied, err = b.checkVolumeProvisions(pod, claimsToProvision, node)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -187,22 +193,24 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
|
||||
// in podBindingCache for the chosen node, and:
|
||||
// 1. Update the pvCache with the new prebound PV.
|
||||
// 2. Update the pvcCache with the new PVCs with annotations set
|
||||
// It will update podBindingCache again with the PVs and PVCs that need an API update.
|
||||
func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound, bindingRequired bool, err error) {
|
||||
// 3. Update podBindingCache again with cached API updates for PVs and PVCs.
|
||||
func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, err error) {
|
||||
podName := getPodName(assumedPod)
|
||||
|
||||
glog.V(4).Infof("AssumePodVolumes for pod %q, node %q", podName, nodeName)
|
||||
|
||||
if allBound := b.arePodVolumesBound(assumedPod); allBound {
|
||||
glog.V(4).Infof("AssumePodVolumes for pod %q, node %q: all PVCs bound and nothing to do", podName, nodeName)
|
||||
return true, false, nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
assumedPod.Spec.NodeName = nodeName
|
||||
// Assume PV
|
||||
claimsToBind := b.podBindingCache.GetBindings(assumedPod, nodeName)
|
||||
newBindings := []*bindingInfo{}
|
||||
|
||||
claimsToBind := b.podBindingCache.GetBindings(assumedPod, nodeName)
|
||||
claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, nodeName)
|
||||
|
||||
// Assume PV
|
||||
newBindings := []*bindingInfo{}
|
||||
for _, binding := range claimsToBind {
|
||||
newPV, dirty, err := b.ctrl.getBindVolumeToClaim(binding.pv, binding.pvc)
|
||||
glog.V(5).Infof("AssumePodVolumes: getBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v",
|
||||
@@ -214,29 +222,20 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al
|
||||
err)
|
||||
if err != nil {
|
||||
b.revertAssumedPVs(newBindings)
|
||||
return false, true, err
|
||||
return false, err
|
||||
}
|
||||
// TODO: can we assume everytime?
|
||||
if dirty {
|
||||
err = b.pvCache.Assume(newPV)
|
||||
if err != nil {
|
||||
b.revertAssumedPVs(newBindings)
|
||||
return false, true, err
|
||||
return false, err
|
||||
}
|
||||
|
||||
newBindings = append(newBindings, &bindingInfo{pv: newPV, pvc: binding.pvc})
|
||||
}
|
||||
}
|
||||
|
||||
// Don't update cached bindings if no API updates are needed. This can happen if we
|
||||
// previously updated the PV object and are waiting for the PV controller to finish binding.
|
||||
if len(newBindings) != 0 {
|
||||
bindingRequired = true
|
||||
b.podBindingCache.UpdateBindings(assumedPod, nodeName, newBindings)
|
||||
newBindings = append(newBindings, &bindingInfo{pv: newPV, pvc: binding.pvc})
|
||||
}
|
||||
|
||||
// Assume PVCs
|
||||
claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, nodeName)
|
||||
|
||||
newProvisionedPVCs := []*v1.PersistentVolumeClaim{}
|
||||
for _, claim := range claimsToProvision {
|
||||
// The claims from method args can be pointing to watcher cache. We must not
|
||||
@@ -253,50 +252,37 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al
|
||||
newProvisionedPVCs = append(newProvisionedPVCs, claimClone)
|
||||
}
|
||||
|
||||
if len(newProvisionedPVCs) != 0 {
|
||||
bindingRequired = true
|
||||
b.podBindingCache.UpdateProvisionedPVCs(assumedPod, nodeName, newProvisionedPVCs)
|
||||
}
|
||||
// Update cache with the assumed pvcs and pvs
|
||||
// Even if length is zero, update the cache with an empty slice to indicate that no
|
||||
// operations are needed
|
||||
b.podBindingCache.UpdateBindings(assumedPod, nodeName, newBindings)
|
||||
b.podBindingCache.UpdateProvisionedPVCs(assumedPod, nodeName, newProvisionedPVCs)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// BindPodVolumes gets the cached bindings and PVCs to provision in podBindingCache
|
||||
// and makes the API update for those PVs/PVCs.
|
||||
// BindPodVolumes gets the cached bindings and PVCs to provision in podBindingCache,
|
||||
// makes the API update for those PVs/PVCs, and waits for the PVCs to be completely bound
|
||||
// by the PV controller.
|
||||
func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod) error {
|
||||
podName := getPodName(assumedPod)
|
||||
glog.V(4).Infof("BindPodVolumes for pod %q", podName)
|
||||
glog.V(4).Infof("BindPodVolumes for pod %q, node %q", podName, assumedPod.Spec.NodeName)
|
||||
|
||||
bindings := b.podBindingCache.GetBindings(assumedPod, assumedPod.Spec.NodeName)
|
||||
claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, assumedPod.Spec.NodeName)
|
||||
|
||||
// Do the actual prebinding. Let the PV controller take care of the rest
|
||||
// There is no API rollback if the actual binding fails
|
||||
for i, bindingInfo := range bindings {
|
||||
glog.V(5).Infof("BindPodVolumes: Pod %q, binding PV %q to PVC %q", podName, bindingInfo.pv.Name, bindingInfo.pvc.Name)
|
||||
_, err := b.ctrl.updateBindVolumeToClaim(bindingInfo.pv, bindingInfo.pvc, false)
|
||||
if err != nil {
|
||||
// only revert assumed cached updates for volumes we haven't successfully bound
|
||||
b.revertAssumedPVs(bindings[i:])
|
||||
// Revert all of the assumed cached updates for claims,
|
||||
// since no actual API update will be done
|
||||
b.revertAssumedPVCs(claimsToProvision)
|
||||
return err
|
||||
}
|
||||
// Start API operations
|
||||
err := b.bindAPIUpdate(podName, bindings, claimsToProvision)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest
|
||||
// PV controller is expect to signal back by removing related annotations if actual provisioning fails
|
||||
for i, claim := range claimsToProvision {
|
||||
if _, err := b.ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim); err != nil {
|
||||
glog.V(4).Infof("updating PersistentVolumeClaim[%s] failed: %v", getPVCName(claim), err)
|
||||
// only revert assumed cached updates for claims we haven't successfully updated
|
||||
b.revertAssumedPVCs(claimsToProvision[i:])
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return wait.Poll(time.Second, b.bindTimeout, func() (bool, error) {
|
||||
// Get cached values every time in case the pod gets deleted
|
||||
bindings = b.podBindingCache.GetBindings(assumedPod, assumedPod.Spec.NodeName)
|
||||
claimsToProvision = b.podBindingCache.GetProvisionedPVCs(assumedPod, assumedPod.Spec.NodeName)
|
||||
return b.checkBindings(assumedPod, bindings, claimsToProvision)
|
||||
})
|
||||
}
|
||||
|
||||
func getPodName(pod *v1.Pod) string {
|
||||
@@ -307,12 +293,131 @@ func getPVCName(pvc *v1.PersistentVolumeClaim) string {
|
||||
return pvc.Namespace + "/" + pvc.Name
|
||||
}
|
||||
|
||||
func (b *volumeBinder) isVolumeBound(namespace string, vol *v1.Volume, checkFullyBound bool) (bool, *v1.PersistentVolumeClaim, error) {
|
||||
// bindAPIUpdate gets the cached bindings and PVCs to provision in podBindingCache
|
||||
// and makes the API update for those PVs/PVCs.
|
||||
func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) error {
|
||||
if bindings == nil {
|
||||
return fmt.Errorf("failed to get cached bindings for pod %q", podName)
|
||||
}
|
||||
if claimsToProvision == nil {
|
||||
return fmt.Errorf("failed to get cached claims to provision for pod %q", podName)
|
||||
}
|
||||
|
||||
lastProcessedBinding := 0
|
||||
lastProcessedProvisioning := 0
|
||||
defer func() {
|
||||
// only revert assumed cached updates for volumes we haven't successfully bound
|
||||
if lastProcessedBinding < len(bindings) {
|
||||
b.revertAssumedPVs(bindings[lastProcessedBinding:])
|
||||
}
|
||||
// only revert assumed cached updates for claims we haven't updated,
|
||||
if lastProcessedProvisioning < len(claimsToProvision) {
|
||||
b.revertAssumedPVCs(claimsToProvision[lastProcessedProvisioning:])
|
||||
}
|
||||
}()
|
||||
|
||||
var (
|
||||
binding *bindingInfo
|
||||
claim *v1.PersistentVolumeClaim
|
||||
)
|
||||
|
||||
// Do the actual prebinding. Let the PV controller take care of the rest
|
||||
// There is no API rollback if the actual binding fails
|
||||
for _, binding = range bindings {
|
||||
glog.V(5).Infof("bindAPIUpdate: Pod %q, binding PV %q to PVC %q", podName, binding.pv.Name, binding.pvc.Name)
|
||||
// TODO: does it hurt if we make an api call and nothing needs to be updated?
|
||||
if _, err := b.ctrl.updateBindVolumeToClaim(binding.pv, binding.pvc, false); err != nil {
|
||||
return err
|
||||
}
|
||||
lastProcessedBinding++
|
||||
}
|
||||
|
||||
// Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest
|
||||
// PV controller is expect to signal back by removing related annotations if actual provisioning fails
|
||||
for _, claim = range claimsToProvision {
|
||||
glog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim))
|
||||
if _, err := b.ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim); err != nil {
|
||||
return err
|
||||
}
|
||||
lastProcessedProvisioning++
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkBindings runs through all the PVCs in the Pod and checks:
|
||||
// * if the PVC is fully bound
|
||||
// * if there are any conditions that require binding to fail and be retried
|
||||
//
|
||||
// It returns true when all of the Pod's PVCs are fully bound, and error if
|
||||
// binding (and scheduling) needs to be retried
|
||||
func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*bindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) (bool, error) {
|
||||
podName := getPodName(pod)
|
||||
if bindings == nil {
|
||||
return false, fmt.Errorf("failed to get cached bindings for pod %q", podName)
|
||||
}
|
||||
if claimsToProvision == nil {
|
||||
return false, fmt.Errorf("failed to get cached claims to provision for pod %q", podName)
|
||||
}
|
||||
|
||||
for _, binding := range bindings {
|
||||
// Check for any conditions that might require scheduling retry
|
||||
|
||||
// Check if pv still exists
|
||||
pv, err := b.pvCache.GetPV(binding.pv.Name)
|
||||
if err != nil || pv == nil {
|
||||
return false, fmt.Errorf("failed to check pv binding: %v", err)
|
||||
}
|
||||
|
||||
// Check if pv.ClaimRef got dropped by unbindVolume()
|
||||
if pv.Spec.ClaimRef == nil || pv.Spec.ClaimRef.UID == "" {
|
||||
return false, fmt.Errorf("ClaimRef got reset for pv %q", pv.Name)
|
||||
}
|
||||
|
||||
// Check if pvc is fully bound
|
||||
if isBound, _, err := b.isPVCBound(binding.pvc.Namespace, binding.pvc.Name); !isBound || err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// TODO; what if pvc is bound to the wrong pv? It means our assume cache should be reverted.
|
||||
// Or will pv controller cleanup the pv.ClaimRef?
|
||||
}
|
||||
|
||||
for _, claim := range claimsToProvision {
|
||||
bound, pvc, err := b.isPVCBound(claim.Namespace, claim.Name)
|
||||
if err != nil || pvc == nil {
|
||||
return false, fmt.Errorf("failed to check pvc binding: %v", err)
|
||||
}
|
||||
|
||||
// Check if selectedNode annotation is still set
|
||||
if pvc.Annotations == nil {
|
||||
return false, fmt.Errorf("selectedNode annotation reset for PVC %q", pvc.Name)
|
||||
}
|
||||
selectedNode := pvc.Annotations[annSelectedNode]
|
||||
if selectedNode != pod.Spec.NodeName {
|
||||
return false, fmt.Errorf("selectedNode annotation value %q not set to scheduled node %q", selectedNode, pod.Spec.NodeName)
|
||||
}
|
||||
|
||||
if !bound {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// All pvs and pvcs that we operated on are bound
|
||||
glog.V(4).Infof("All PVCs for pod %q are bound", podName)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (b *volumeBinder) isVolumeBound(namespace string, vol *v1.Volume) (bool, *v1.PersistentVolumeClaim, error) {
|
||||
if vol.PersistentVolumeClaim == nil {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
pvcName := vol.PersistentVolumeClaim.ClaimName
|
||||
return b.isPVCBound(namespace, pvcName)
|
||||
}
|
||||
|
||||
func (b *volumeBinder) isPVCBound(namespace, pvcName string) (bool, *v1.PersistentVolumeClaim, error) {
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pvcName,
|
||||
@@ -326,17 +431,13 @@ func (b *volumeBinder) isVolumeBound(namespace string, vol *v1.Volume, checkFull
|
||||
|
||||
pvName := pvc.Spec.VolumeName
|
||||
if pvName != "" {
|
||||
if checkFullyBound {
|
||||
if metav1.HasAnnotation(pvc.ObjectMeta, annBindCompleted) {
|
||||
glog.V(5).Infof("PVC %q is fully bound to PV %q", getPVCName(pvc), pvName)
|
||||
return true, pvc, nil
|
||||
} else {
|
||||
glog.V(5).Infof("PVC %q is not fully bound to PV %q", getPVCName(pvc), pvName)
|
||||
return false, pvc, nil
|
||||
}
|
||||
if metav1.HasAnnotation(pvc.ObjectMeta, annBindCompleted) {
|
||||
glog.V(5).Infof("PVC %q is fully bound to PV %q", getPVCName(pvc), pvName)
|
||||
return true, pvc, nil
|
||||
} else {
|
||||
glog.V(5).Infof("PVC %q is not fully bound to PV %q", getPVCName(pvc), pvName)
|
||||
return false, pvc, nil
|
||||
}
|
||||
glog.V(5).Infof("PVC %q is bound or prebound to PV %q", getPVCName(pvc), pvName)
|
||||
return true, pvc, nil
|
||||
}
|
||||
|
||||
glog.V(5).Infof("PVC %q is not bound", getPVCName(pvc))
|
||||
@@ -346,7 +447,7 @@ func (b *volumeBinder) isVolumeBound(namespace string, vol *v1.Volume, checkFull
|
||||
// arePodVolumesBound returns true if all volumes are fully bound
|
||||
func (b *volumeBinder) arePodVolumesBound(pod *v1.Pod) bool {
|
||||
for _, vol := range pod.Spec.Volumes {
|
||||
if isBound, _, _ := b.isVolumeBound(pod.Namespace, &vol, true); !isBound {
|
||||
if isBound, _, _ := b.isVolumeBound(pod.Namespace, &vol); !isBound {
|
||||
// Pod has at least one PVC that needs binding
|
||||
return false
|
||||
}
|
||||
@@ -362,7 +463,7 @@ func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentV
|
||||
unboundClaims = []*bindingInfo{}
|
||||
|
||||
for _, vol := range pod.Spec.Volumes {
|
||||
volumeBound, pvc, err := b.isVolumeBound(pod.Namespace, &vol, false)
|
||||
volumeBound, pvc, err := b.isVolumeBound(pod.Namespace, &vol)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
@@ -376,7 +477,8 @@ func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentV
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if delayBinding {
|
||||
// Prebound PVCs are treated as unbound immediate binding
|
||||
if delayBinding && pvc.Spec.VolumeName == "" {
|
||||
// Scheduler path
|
||||
unboundClaims = append(unboundClaims, &bindingInfo{pvc: pvc})
|
||||
} else {
|
||||
@@ -522,19 +624,6 @@ type bindingInfo struct {
|
||||
pv *v1.PersistentVolume
|
||||
}
|
||||
|
||||
// Used in unit test errors
|
||||
func (b bindingInfo) String() string {
|
||||
pvcName := ""
|
||||
pvName := ""
|
||||
if b.pvc != nil {
|
||||
pvcName = getPVCName(b.pvc)
|
||||
}
|
||||
if b.pv != nil {
|
||||
pvName = b.pv.Name
|
||||
}
|
||||
return fmt.Sprintf("[PVC %q, PV %q]", pvcName, pvName)
|
||||
}
|
||||
|
||||
type byPVCSize []*bindingInfo
|
||||
|
||||
func (a byPVCSize) Len() int {
|
||||
|
@@ -31,6 +31,8 @@ type PodBindingCache interface {
|
||||
UpdateBindings(pod *v1.Pod, node string, bindings []*bindingInfo)
|
||||
|
||||
// GetBindings will return the cached bindings for the given pod and node.
|
||||
// A nil return value means that the entry was not found. An empty slice
|
||||
// means that no binding operations are needed.
|
||||
GetBindings(pod *v1.Pod, node string) []*bindingInfo
|
||||
|
||||
// UpdateProvisionedPVCs will update the cache with the given provisioning decisions
|
||||
@@ -38,6 +40,8 @@ type PodBindingCache interface {
|
||||
UpdateProvisionedPVCs(pod *v1.Pod, node string, provisionings []*v1.PersistentVolumeClaim)
|
||||
|
||||
// GetProvisionedPVCs will return the cached provisioning decisions for the given pod and node.
|
||||
// A nil return value means that the entry was not found. An empty slice
|
||||
// means that no provisioning operations are needed.
|
||||
GetProvisionedPVCs(pod *v1.Pod, node string) []*v1.PersistentVolumeClaim
|
||||
|
||||
// DeleteBindings will remove all cached bindings and provisionings for the given pod.
|
||||
@@ -46,7 +50,8 @@ type PodBindingCache interface {
|
||||
}
|
||||
|
||||
type podBindingCache struct {
|
||||
mutex sync.Mutex
|
||||
// synchronizes bindingDecisions
|
||||
rwMutex sync.RWMutex
|
||||
|
||||
// Key = pod name
|
||||
// Value = nodeDecisions
|
||||
@@ -68,16 +73,16 @@ func NewPodBindingCache() PodBindingCache {
|
||||
}
|
||||
|
||||
func (c *podBindingCache) DeleteBindings(pod *v1.Pod) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.Lock()
|
||||
defer c.rwMutex.Unlock()
|
||||
|
||||
podName := getPodName(pod)
|
||||
delete(c.bindingDecisions, podName)
|
||||
}
|
||||
|
||||
func (c *podBindingCache) UpdateBindings(pod *v1.Pod, node string, bindings []*bindingInfo) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.Lock()
|
||||
defer c.rwMutex.Unlock()
|
||||
|
||||
podName := getPodName(pod)
|
||||
decisions, ok := c.bindingDecisions[podName]
|
||||
@@ -97,8 +102,8 @@ func (c *podBindingCache) UpdateBindings(pod *v1.Pod, node string, bindings []*b
|
||||
}
|
||||
|
||||
func (c *podBindingCache) GetBindings(pod *v1.Pod, node string) []*bindingInfo {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.RLock()
|
||||
defer c.rwMutex.RUnlock()
|
||||
|
||||
podName := getPodName(pod)
|
||||
decisions, ok := c.bindingDecisions[podName]
|
||||
@@ -113,8 +118,8 @@ func (c *podBindingCache) GetBindings(pod *v1.Pod, node string) []*bindingInfo {
|
||||
}
|
||||
|
||||
func (c *podBindingCache) UpdateProvisionedPVCs(pod *v1.Pod, node string, pvcs []*v1.PersistentVolumeClaim) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.Lock()
|
||||
defer c.rwMutex.Unlock()
|
||||
|
||||
podName := getPodName(pod)
|
||||
decisions, ok := c.bindingDecisions[podName]
|
||||
@@ -134,8 +139,8 @@ func (c *podBindingCache) UpdateProvisionedPVCs(pod *v1.Pod, node string, pvcs [
|
||||
}
|
||||
|
||||
func (c *podBindingCache) GetProvisionedPVCs(pod *v1.Pod, node string) []*v1.PersistentVolumeClaim {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.RLock()
|
||||
defer c.rwMutex.RUnlock()
|
||||
|
||||
podName := getPodName(pod)
|
||||
decisions, ok := c.bindingDecisions[podName]
|
||||
|
21
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder_fake.go
generated
vendored
21
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder_fake.go
generated
vendored
@@ -16,18 +16,15 @@ limitations under the License.
|
||||
|
||||
package persistentvolume
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
import "k8s.io/api/core/v1"
|
||||
|
||||
type FakeVolumeBinderConfig struct {
|
||||
AllBound bool
|
||||
FindUnboundSatsified bool
|
||||
FindBoundSatsified bool
|
||||
FindErr error
|
||||
AssumeBindingRequired bool
|
||||
AssumeErr error
|
||||
BindErr error
|
||||
AllBound bool
|
||||
FindUnboundSatsified bool
|
||||
FindBoundSatsified bool
|
||||
FindErr error
|
||||
AssumeErr error
|
||||
BindErr error
|
||||
}
|
||||
|
||||
// NewVolumeBinder sets up all the caches needed for the scheduler to make
|
||||
@@ -48,9 +45,9 @@ func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVo
|
||||
return b.config.FindUnboundSatsified, b.config.FindBoundSatsified, b.config.FindErr
|
||||
}
|
||||
|
||||
func (b *FakeVolumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (bool, bool, error) {
|
||||
func (b *FakeVolumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (bool, error) {
|
||||
b.AssumeCalled = true
|
||||
return b.config.AllBound, b.config.AssumeBindingRequired, b.config.AssumeErr
|
||||
return b.config.AllBound, b.config.AssumeErr
|
||||
}
|
||||
|
||||
func (b *FakeVolumeBinder) BindPodVolumes(assumedPod *v1.Pod) error {
|
||||
|
618
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder_test.go
generated
vendored
618
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder_test.go
generated
vendored
@@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
@@ -38,20 +39,30 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
unboundPVC = makeTestPVC("unbound-pvc", "1G", pvcUnbound, "", "1", &waitClass)
|
||||
unboundPVC2 = makeTestPVC("unbound-pvc2", "5G", pvcUnbound, "", "1", &waitClass)
|
||||
preboundPVC = makeTestPVC("prebound-pvc", "1G", pvcPrebound, "pv-node1a", "1", &waitClass)
|
||||
boundPVC = makeTestPVC("bound-pvc", "1G", pvcBound, "pv-bound", "1", &waitClass)
|
||||
boundPVC2 = makeTestPVC("bound-pvc2", "1G", pvcBound, "pv-bound2", "1", &waitClass)
|
||||
badPVC = makeBadPVC()
|
||||
immediateUnboundPVC = makeTestPVC("immediate-unbound-pvc", "1G", pvcUnbound, "", "1", &immediateClass)
|
||||
immediateBoundPVC = makeTestPVC("immediate-bound-pvc", "1G", pvcBound, "pv-bound-immediate", "1", &immediateClass)
|
||||
provisionedPVC = makeTestPVC("provisioned-pvc", "1Gi", pvcUnbound, "", "1", &waitClass)
|
||||
provisionedPVC2 = makeTestPVC("provisioned-pvc2", "1Gi", pvcUnbound, "", "1", &waitClass)
|
||||
provisionedPVCHigherVersion = makeTestPVC("provisioned-pvc2", "1Gi", pvcUnbound, "", "2", &waitClass)
|
||||
noProvisionerPVC = makeTestPVC("no-provisioner-pvc", "1Gi", pvcUnbound, "", "1", &provisionNotSupportClass)
|
||||
topoMismatchPVC = makeTestPVC("topo-mismatch-pvc", "1Gi", pvcUnbound, "", "1", &topoMismatchClass)
|
||||
// PVCs for manual binding
|
||||
// TODO: clean up all of these
|
||||
unboundPVC = makeTestPVC("unbound-pvc", "1G", "", pvcUnbound, "", "1", &waitClass)
|
||||
unboundPVC2 = makeTestPVC("unbound-pvc2", "5G", "", pvcUnbound, "", "1", &waitClass)
|
||||
preboundPVC = makeTestPVC("prebound-pvc", "1G", "", pvcPrebound, "pv-node1a", "1", &waitClass)
|
||||
preboundPVCNode1a = makeTestPVC("unbound-pvc", "1G", "", pvcPrebound, "pv-node1a", "1", &waitClass)
|
||||
boundPVC = makeTestPVC("bound-pvc", "1G", "", pvcBound, "pv-bound", "1", &waitClass)
|
||||
boundPVC2 = makeTestPVC("bound-pvc2", "1G", "", pvcBound, "pv-bound2", "1", &waitClass)
|
||||
boundPVCNode1a = makeTestPVC("unbound-pvc", "1G", "", pvcBound, "pv-node1a", "1", &waitClass)
|
||||
badPVC = makeBadPVC()
|
||||
immediateUnboundPVC = makeTestPVC("immediate-unbound-pvc", "1G", "", pvcUnbound, "", "1", &immediateClass)
|
||||
immediateBoundPVC = makeTestPVC("immediate-bound-pvc", "1G", "", pvcBound, "pv-bound-immediate", "1", &immediateClass)
|
||||
|
||||
// PVCs for dynamic provisioning
|
||||
provisionedPVC = makeTestPVC("provisioned-pvc", "1Gi", "", pvcUnbound, "", "1", &waitClassWithProvisioner)
|
||||
provisionedPVC2 = makeTestPVC("provisioned-pvc2", "1Gi", "", pvcUnbound, "", "1", &waitClassWithProvisioner)
|
||||
provisionedPVCHigherVersion = makeTestPVC("provisioned-pvc2", "1Gi", "", pvcUnbound, "", "2", &waitClassWithProvisioner)
|
||||
provisionedPVCBound = makeTestPVC("provisioned-pvc", "1Gi", "", pvcBound, "some-pv", "1", &waitClassWithProvisioner)
|
||||
noProvisionerPVC = makeTestPVC("no-provisioner-pvc", "1Gi", "", pvcUnbound, "", "1", &waitClass)
|
||||
topoMismatchPVC = makeTestPVC("topo-mismatch-pvc", "1Gi", "", pvcUnbound, "", "1", &topoMismatchClass)
|
||||
|
||||
selectedNodePVC = makeTestPVC("provisioned-pvc", "1Gi", nodeLabelValue, pvcSelectedNode, "", "1", &waitClassWithProvisioner)
|
||||
|
||||
// PVs for manual binding
|
||||
pvNoNode = makeTestPV("pv-no-node", "", "1G", "1", nil, waitClass)
|
||||
pvNode1a = makeTestPV("pv-node1a", "node1", "5G", "1", nil, waitClass)
|
||||
pvNode1b = makeTestPV("pv-node1b", "node1", "10G", "1", nil, waitClass)
|
||||
@@ -59,12 +70,13 @@ var (
|
||||
pvNode2 = makeTestPV("pv-node2", "node2", "1G", "1", nil, waitClass)
|
||||
pvPrebound = makeTestPV("pv-prebound", "node1", "1G", "1", unboundPVC, waitClass)
|
||||
pvBound = makeTestPV("pv-bound", "node1", "1G", "1", boundPVC, waitClass)
|
||||
pvNode1aBound = makeTestPV("pv-node1a", "node1", "1G", "1", unboundPVC, waitClass)
|
||||
pvNode1bBound = makeTestPV("pv-node1b", "node1", "5G", "1", unboundPVC2, waitClass)
|
||||
pvNode1bBoundHigherVersion = makeTestPV("pv-node1b", "node1", "5G", "2", unboundPVC2, waitClass)
|
||||
pvNode1aBound = makeTestPV("pv-node1a", "node1", "5G", "1", unboundPVC, waitClass)
|
||||
pvNode1bBound = makeTestPV("pv-node1b", "node1", "10G", "1", unboundPVC2, waitClass)
|
||||
pvNode1bBoundHigherVersion = makeTestPV("pv-node1b", "node1", "10G", "2", unboundPVC2, waitClass)
|
||||
pvBoundImmediate = makeTestPV("pv-bound-immediate", "node1", "1G", "1", immediateBoundPVC, immediateClass)
|
||||
pvBoundImmediateNode2 = makeTestPV("pv-bound-immediate", "node2", "1G", "1", immediateBoundPVC, immediateClass)
|
||||
|
||||
// PVC/PV bindings for manual binding
|
||||
binding1a = makeBinding(unboundPVC, pvNode1a)
|
||||
binding1b = makeBinding(unboundPVC2, pvNode1b)
|
||||
bindingNoNode = makeBinding(unboundPVC, pvNoNode)
|
||||
@@ -72,11 +84,13 @@ var (
|
||||
binding1aBound = makeBinding(unboundPVC, pvNode1aBound)
|
||||
binding1bBound = makeBinding(unboundPVC2, pvNode1bBound)
|
||||
|
||||
// storage class names
|
||||
waitClass = "waitClass"
|
||||
immediateClass = "immediateClass"
|
||||
provisionNotSupportClass = "provisionNotSupportedClass"
|
||||
waitClassWithProvisioner = "waitClassWithProvisioner"
|
||||
topoMismatchClass = "topoMismatchClass"
|
||||
|
||||
// node topology
|
||||
nodeLabelKey = "nodeKey"
|
||||
nodeLabelValue = "node1"
|
||||
)
|
||||
@@ -102,7 +116,8 @@ func newTestBinder(t *testing.T) *testEnv {
|
||||
client,
|
||||
pvcInformer,
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
classInformer)
|
||||
classInformer,
|
||||
10*time.Second)
|
||||
|
||||
// Add storageclasses
|
||||
waitMode := storagev1.VolumeBindingWaitForFirstConsumer
|
||||
@@ -110,7 +125,7 @@ func newTestBinder(t *testing.T) *testEnv {
|
||||
classes := []*storagev1.StorageClass{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: waitClass,
|
||||
Name: waitClassWithProvisioner,
|
||||
},
|
||||
VolumeBindingMode: &waitMode,
|
||||
Provisioner: "test-provisioner",
|
||||
@@ -133,7 +148,7 @@ func newTestBinder(t *testing.T) *testEnv {
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: provisionNotSupportClass,
|
||||
Name: waitClass,
|
||||
},
|
||||
VolumeBindingMode: &waitMode,
|
||||
Provisioner: "kubernetes.io/no-provisioner",
|
||||
@@ -247,17 +262,44 @@ func (env *testEnv) initPodCache(pod *v1.Pod, node string, bindings []*bindingIn
|
||||
func (env *testEnv) validatePodCache(t *testing.T, name, node string, pod *v1.Pod, expectedBindings []*bindingInfo, expectedProvisionings []*v1.PersistentVolumeClaim) {
|
||||
cache := env.internalBinder.podBindingCache
|
||||
bindings := cache.GetBindings(pod, node)
|
||||
if aLen, eLen := len(bindings), len(expectedBindings); aLen != eLen {
|
||||
t.Errorf("Test %q failed. expected %v bindings, got %v", name, eLen, aLen)
|
||||
} else if expectedBindings == nil && bindings != nil {
|
||||
// nil and empty are different
|
||||
t.Errorf("Test %q failed. expected nil bindings, got empty", name)
|
||||
} else if expectedBindings != nil && bindings == nil {
|
||||
// nil and empty are different
|
||||
t.Errorf("Test %q failed. expected empty bindings, got nil", name)
|
||||
} else {
|
||||
for i := 0; i < aLen; i++ {
|
||||
// Validate PV
|
||||
if !reflect.DeepEqual(expectedBindings[i].pv, bindings[i].pv) {
|
||||
t.Errorf("Test %q failed. binding.pv doesn't match [A-expected, B-got]: %s", name, diff.ObjectDiff(expectedBindings[i].pv, bindings[i].pv))
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expectedBindings, bindings) {
|
||||
t.Errorf("Test %q failed: Expected bindings %+v, got %+v", name, expectedBindings, bindings)
|
||||
// Validate PVC
|
||||
if !reflect.DeepEqual(expectedBindings[i].pvc, bindings[i].pvc) {
|
||||
t.Errorf("Test %q failed. binding.pvc doesn't match [A-expected, B-got]: %s", name, diff.ObjectDiff(expectedBindings[i].pvc, bindings[i].pvc))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provisionedClaims := cache.GetProvisionedPVCs(pod, node)
|
||||
|
||||
if !reflect.DeepEqual(expectedProvisionings, provisionedClaims) {
|
||||
t.Errorf("Test %q failed: Expected provisionings %+v, got %+v", name, expectedProvisionings, provisionedClaims)
|
||||
if aLen, eLen := len(provisionedClaims), len(expectedProvisionings); aLen != eLen {
|
||||
t.Errorf("Test %q failed. expected %v provisioned claims, got %v", name, eLen, aLen)
|
||||
} else if expectedProvisionings == nil && provisionedClaims != nil {
|
||||
// nil and empty are different
|
||||
t.Errorf("Test %q failed. expected nil provisionings, got empty", name)
|
||||
} else if expectedProvisionings != nil && provisionedClaims == nil {
|
||||
// nil and empty are different
|
||||
t.Errorf("Test %q failed. expected empty provisionings, got nil", name)
|
||||
} else {
|
||||
for i := 0; i < aLen; i++ {
|
||||
if !reflect.DeepEqual(expectedProvisionings[i], provisionedClaims[i]) {
|
||||
t.Errorf("Test %q failed. provisioned claims doesn't match [A-expected, B-got]: %s", name, diff.ObjectDiff(expectedProvisionings[i], provisionedClaims[i]))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (env *testEnv) getPodBindings(t *testing.T, name, node string, pod *v1.Pod) []*bindingInfo {
|
||||
@@ -266,8 +308,6 @@ func (env *testEnv) getPodBindings(t *testing.T, name, node string, pod *v1.Pod)
|
||||
}
|
||||
|
||||
func (env *testEnv) validateAssume(t *testing.T, name string, pod *v1.Pod, bindings []*bindingInfo, provisionings []*v1.PersistentVolumeClaim) {
|
||||
// TODO: Check binding cache
|
||||
|
||||
// Check pv cache
|
||||
pvCache := env.internalBinder.pvCache
|
||||
for _, b := range bindings {
|
||||
@@ -383,17 +423,21 @@ const (
|
||||
pvcUnbound = iota
|
||||
pvcPrebound
|
||||
pvcBound
|
||||
pvcSelectedNode
|
||||
)
|
||||
|
||||
func makeTestPVC(name, size string, pvcBoundState int, pvName, resourceVersion string, className *string) *v1.PersistentVolumeClaim {
|
||||
func makeTestPVC(name, size, node string, pvcBoundState int, pvName, resourceVersion string, className *string) *v1.PersistentVolumeClaim {
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "PersistentVolumeClaim",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: "testns",
|
||||
UID: types.UID("pvc-uid"),
|
||||
ResourceVersion: resourceVersion,
|
||||
SelfLink: testapi.Default.SelfLink("pvc", name),
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
Resources: v1.ResourceRequirements{
|
||||
@@ -406,6 +450,9 @@ func makeTestPVC(name, size string, pvcBoundState int, pvName, resourceVersion s
|
||||
}
|
||||
|
||||
switch pvcBoundState {
|
||||
case pvcSelectedNode:
|
||||
metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, annSelectedNode, node)
|
||||
// don't fallthrough
|
||||
case pvcBound:
|
||||
metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, annBindCompleted, "yes")
|
||||
fallthrough
|
||||
@@ -454,9 +501,12 @@ func makeTestPV(name, node, capacity, version string, boundToPVC *v1.PersistentV
|
||||
|
||||
if boundToPVC != nil {
|
||||
pv.Spec.ClaimRef = &v1.ObjectReference{
|
||||
Name: boundToPVC.Name,
|
||||
Namespace: boundToPVC.Namespace,
|
||||
UID: boundToPVC.UID,
|
||||
Kind: boundToPVC.Kind,
|
||||
APIVersion: boundToPVC.APIVersion,
|
||||
ResourceVersion: boundToPVC.ResourceVersion,
|
||||
Name: boundToPVC.Name,
|
||||
Namespace: boundToPVC.Namespace,
|
||||
UID: boundToPVC.UID,
|
||||
}
|
||||
metav1.SetMetaDataAnnotation(&pv.ObjectMeta, annBoundByController, "yes")
|
||||
}
|
||||
@@ -464,6 +514,24 @@ func makeTestPV(name, node, capacity, version string, boundToPVC *v1.PersistentV
|
||||
return pv
|
||||
}
|
||||
|
||||
func pvcSetSelectedNode(pvc *v1.PersistentVolumeClaim, node string) *v1.PersistentVolumeClaim {
|
||||
newPVC := pvc.DeepCopy()
|
||||
metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, annSelectedNode, node)
|
||||
return newPVC
|
||||
}
|
||||
|
||||
func pvcSetEmptyAnnotations(pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC.Annotations = map[string]string{}
|
||||
return newPVC
|
||||
}
|
||||
|
||||
func pvRemoveClaimUID(pv *v1.PersistentVolume) *v1.PersistentVolume {
|
||||
newPV := pv.DeepCopy()
|
||||
newPV.Spec.ClaimRef.UID = ""
|
||||
return newPV
|
||||
}
|
||||
|
||||
func makePod(pvcs []*v1.PersistentVolumeClaim) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -515,7 +583,7 @@ func makeBinding(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) *bindin
|
||||
func addProvisionAnn(pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
|
||||
res := pvc.DeepCopy()
|
||||
// Add provision related annotations
|
||||
res.Annotations[annSelectedNode] = nodeLabelValue
|
||||
metav1.SetMetaDataAnnotation(&res.ObjectMeta, annSelectedNode, nodeLabelValue)
|
||||
|
||||
return res
|
||||
}
|
||||
@@ -568,10 +636,9 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
|
||||
shouldFail: true,
|
||||
},
|
||||
"prebound-pvc": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{preboundPVC},
|
||||
pvs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
expectedUnbound: true,
|
||||
expectedBound: true,
|
||||
podPVCs: []*v1.PersistentVolumeClaim{preboundPVC},
|
||||
pvs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
shouldFail: true,
|
||||
},
|
||||
"unbound-pvc,pv-same-node": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
@@ -621,11 +688,9 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
|
||||
expectedBound: true,
|
||||
},
|
||||
"one-prebound,one-unbound": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, preboundPVC},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
|
||||
expectedBindings: []*bindingInfo{binding1a},
|
||||
expectedUnbound: true,
|
||||
expectedBound: true,
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, preboundPVC},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
|
||||
shouldFail: true,
|
||||
},
|
||||
"immediate-bound-pvc": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC},
|
||||
@@ -776,9 +841,9 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// Set VolumeScheduling and DynamicProvisioningScheduling feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true,DynamicProvisioningScheduling=true")
|
||||
defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false,DynamicProvisioningScheduling=false")
|
||||
// Set VolumeScheduling feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true")
|
||||
defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false")
|
||||
|
||||
testNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -834,69 +899,64 @@ func TestAssumePodVolumes(t *testing.T) {
|
||||
provisionedPVCs []*v1.PersistentVolumeClaim
|
||||
|
||||
// Expected return values
|
||||
shouldFail bool
|
||||
expectedBindingRequired bool
|
||||
expectedAllBound bool
|
||||
shouldFail bool
|
||||
expectedAllBound bool
|
||||
|
||||
// if nil, use bindings
|
||||
expectedBindings []*bindingInfo
|
||||
expectedBindings []*bindingInfo
|
||||
expectedProvisionings []*v1.PersistentVolumeClaim
|
||||
}{
|
||||
"all-bound": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{boundPVC},
|
||||
pvs: []*v1.PersistentVolume{pvBound},
|
||||
expectedAllBound: true,
|
||||
},
|
||||
"prebound-pvc": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{preboundPVC},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
},
|
||||
"one-binding": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
bindings: []*bindingInfo{binding1a},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
expectedBindingRequired: true,
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
bindings: []*bindingInfo{binding1a},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
expectedBindings: []*bindingInfo{binding1aBound},
|
||||
expectedProvisionings: []*v1.PersistentVolumeClaim{},
|
||||
},
|
||||
"two-bindings": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2},
|
||||
bindings: []*bindingInfo{binding1a, binding1b},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
|
||||
expectedBindingRequired: true,
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2},
|
||||
bindings: []*bindingInfo{binding1a, binding1b},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
|
||||
expectedBindings: []*bindingInfo{binding1aBound, binding1bBound},
|
||||
expectedProvisionings: []*v1.PersistentVolumeClaim{},
|
||||
},
|
||||
"pv-already-bound": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
pvs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
expectedBindingRequired: false,
|
||||
expectedBindings: []*bindingInfo{},
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
pvs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
expectedBindings: []*bindingInfo{binding1aBound},
|
||||
expectedProvisionings: []*v1.PersistentVolumeClaim{},
|
||||
},
|
||||
"claimref-failed": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
bindings: []*bindingInfo{binding1a, bindingBad},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
|
||||
shouldFail: true,
|
||||
expectedBindingRequired: true,
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
bindings: []*bindingInfo{binding1a, bindingBad},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
|
||||
shouldFail: true,
|
||||
},
|
||||
"tmpupdate-failed": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
bindings: []*bindingInfo{binding1a, binding1b},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
shouldFail: true,
|
||||
expectedBindingRequired: true,
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
bindings: []*bindingInfo{binding1a, binding1b},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
shouldFail: true,
|
||||
},
|
||||
"one-binding, one-pvc-provisioned": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC},
|
||||
bindings: []*bindingInfo{binding1a},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
expectedBindingRequired: true,
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC},
|
||||
bindings: []*bindingInfo{binding1a},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
expectedBindings: []*bindingInfo{binding1aBound},
|
||||
expectedProvisionings: []*v1.PersistentVolumeClaim{selectedNodePVC},
|
||||
},
|
||||
"one-binding, one-provision-tmpupdate-failed": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVCHigherVersion},
|
||||
bindings: []*bindingInfo{binding1a},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC2},
|
||||
shouldFail: true,
|
||||
expectedBindingRequired: true,
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVCHigherVersion},
|
||||
bindings: []*bindingInfo{binding1a},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC2},
|
||||
shouldFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -911,7 +971,7 @@ func TestAssumePodVolumes(t *testing.T) {
|
||||
testEnv.initVolumes(scenario.pvs, scenario.pvs)
|
||||
|
||||
// Execute
|
||||
allBound, bindingRequired, err := testEnv.binder.AssumePodVolumes(pod, "node1")
|
||||
allBound, err := testEnv.binder.AssumePodVolumes(pod, "node1")
|
||||
|
||||
// Validate
|
||||
if !scenario.shouldFail && err != nil {
|
||||
@@ -920,24 +980,25 @@ func TestAssumePodVolumes(t *testing.T) {
|
||||
if scenario.shouldFail && err == nil {
|
||||
t.Errorf("Test %q failed: returned success but expected error", name)
|
||||
}
|
||||
if scenario.expectedBindingRequired != bindingRequired {
|
||||
t.Errorf("Test %q failed: returned unexpected bindingRequired: %v", name, bindingRequired)
|
||||
}
|
||||
if scenario.expectedAllBound != allBound {
|
||||
t.Errorf("Test %q failed: returned unexpected allBound: %v", name, allBound)
|
||||
}
|
||||
if scenario.expectedBindings == nil {
|
||||
scenario.expectedBindings = scenario.bindings
|
||||
}
|
||||
if scenario.shouldFail {
|
||||
testEnv.validateFailedAssume(t, name, pod, scenario.expectedBindings, scenario.provisionedPVCs)
|
||||
} else {
|
||||
testEnv.validateAssume(t, name, pod, scenario.expectedBindings, scenario.provisionedPVCs)
|
||||
if scenario.expectedProvisionings == nil {
|
||||
scenario.expectedProvisionings = scenario.provisionedPVCs
|
||||
}
|
||||
if scenario.shouldFail {
|
||||
testEnv.validateFailedAssume(t, name, pod, scenario.expectedBindings, scenario.expectedProvisionings)
|
||||
} else {
|
||||
testEnv.validateAssume(t, name, pod, scenario.expectedBindings, scenario.expectedProvisionings)
|
||||
}
|
||||
testEnv.validatePodCache(t, name, pod.Spec.NodeName, pod, scenario.expectedBindings, scenario.expectedProvisionings)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBindPodVolumes(t *testing.T) {
|
||||
func TestBindAPIUpdate(t *testing.T) {
|
||||
scenarios := map[string]struct {
|
||||
// Inputs
|
||||
bindings []*bindingInfo
|
||||
@@ -960,34 +1021,56 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
// if nil, use expectedPVCs
|
||||
expectedAPIPVCs []*v1.PersistentVolumeClaim
|
||||
}{
|
||||
"all-bound": {},
|
||||
"not-fully-bound": {
|
||||
bindings: []*bindingInfo{},
|
||||
"nothing-to-bind-nil": {
|
||||
shouldFail: true,
|
||||
},
|
||||
"nothing-to-bind-bindings-nil": {
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
shouldFail: true,
|
||||
},
|
||||
"nothing-to-bind-provisionings-nil": {
|
||||
bindings: []*bindingInfo{},
|
||||
shouldFail: true,
|
||||
},
|
||||
"nothing-to-bind-empty": {
|
||||
bindings: []*bindingInfo{},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
},
|
||||
"one-binding": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1a},
|
||||
expectedPVs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1a},
|
||||
expectedPVs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
},
|
||||
"two-bindings": {
|
||||
bindings: []*bindingInfo{binding1aBound, binding1bBound},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
|
||||
expectedPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBound},
|
||||
bindings: []*bindingInfo{binding1aBound, binding1bBound},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
|
||||
expectedPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
},
|
||||
"api-already-updated": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
expectedPVs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
},
|
||||
"api-update-failed": {
|
||||
bindings: []*bindingInfo{binding1aBound, binding1bBound},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
|
||||
apiPVs: []*v1.PersistentVolume{pvNode1a, pvNode1bBoundHigherVersion},
|
||||
expectedPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1b},
|
||||
expectedAPIPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBoundHigherVersion},
|
||||
shouldFail: true,
|
||||
bindings: []*bindingInfo{binding1aBound, binding1bBound},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
|
||||
apiPVs: []*v1.PersistentVolume{pvNode1a, pvNode1bBoundHigherVersion},
|
||||
expectedPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1b},
|
||||
expectedAPIPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBoundHigherVersion},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
shouldFail: true,
|
||||
},
|
||||
"one-provisioned-pvc": {
|
||||
bindings: []*bindingInfo{},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
expectedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
},
|
||||
"provision-api-update-failed": {
|
||||
bindings: []*bindingInfo{},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC), addProvisionAnn(provisionedPVC2)},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC, provisionedPVC2},
|
||||
apiPVCs: []*v1.PersistentVolumeClaim{provisionedPVC, provisionedPVCHigherVersion},
|
||||
@@ -995,7 +1078,7 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
expectedAPIPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC), provisionedPVCHigherVersion},
|
||||
shouldFail: true,
|
||||
},
|
||||
"bingding-succeed, provision-api-update-failed": {
|
||||
"binding-succeed, provision-api-update-failed": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1a},
|
||||
expectedPVs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
@@ -1008,7 +1091,7 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for name, scenario := range scenarios {
|
||||
glog.V(5).Infof("Running test case %q", name)
|
||||
glog.V(4).Infof("Running test case %q", name)
|
||||
|
||||
// Setup
|
||||
testEnv := newTestBinder(t)
|
||||
@@ -1024,7 +1107,7 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
testEnv.assumeVolumes(t, name, "node1", pod, scenario.bindings, scenario.provisionedPVCs)
|
||||
|
||||
// Execute
|
||||
err := testEnv.binder.BindPodVolumes(pod)
|
||||
err := testEnv.internalBinder.bindAPIUpdate(pod.Name, scenario.bindings, scenario.provisionedPVCs)
|
||||
|
||||
// Validate
|
||||
if !scenario.shouldFail && err != nil {
|
||||
@@ -1044,6 +1127,301 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckBindings(t *testing.T) {
|
||||
scenarios := map[string]struct {
|
||||
// Inputs
|
||||
bindings []*bindingInfo
|
||||
cachedPVs []*v1.PersistentVolume
|
||||
|
||||
provisionedPVCs []*v1.PersistentVolumeClaim
|
||||
cachedPVCs []*v1.PersistentVolumeClaim
|
||||
|
||||
// Expected return values
|
||||
shouldFail bool
|
||||
expectedBound bool
|
||||
}{
|
||||
"nothing-to-bind-nil": {
|
||||
shouldFail: true,
|
||||
},
|
||||
"nothing-to-bind-bindings-nil": {
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
shouldFail: true,
|
||||
},
|
||||
"nothing-to-bind-provisionings-nil": {
|
||||
bindings: []*bindingInfo{},
|
||||
shouldFail: true,
|
||||
},
|
||||
"nothing-to-bind": {
|
||||
bindings: []*bindingInfo{},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
expectedBound: true,
|
||||
},
|
||||
"binding-bound": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a},
|
||||
expectedBound: true,
|
||||
},
|
||||
"binding-prebound": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{preboundPVCNode1a},
|
||||
},
|
||||
"binding-unbound": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
},
|
||||
"binding-pvc-not-exists": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
shouldFail: true,
|
||||
},
|
||||
"binding-pv-not-exists": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a},
|
||||
shouldFail: true,
|
||||
},
|
||||
"binding-claimref-nil": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1a},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a},
|
||||
shouldFail: true,
|
||||
},
|
||||
"binding-claimref-uid-empty": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
cachedPVs: []*v1.PersistentVolume{pvRemoveClaimUID(pvNode1aBound)},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a},
|
||||
shouldFail: true,
|
||||
},
|
||||
"binding-one-bound,one-unbound": {
|
||||
bindings: []*bindingInfo{binding1aBound, binding1bBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBound},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a, unboundPVC2},
|
||||
},
|
||||
"provisioning-pvc-bound": {
|
||||
bindings: []*bindingInfo{},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVCBound)},
|
||||
expectedBound: true,
|
||||
},
|
||||
"provisioning-pvc-unbound": {
|
||||
bindings: []*bindingInfo{},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
},
|
||||
"provisioning-pvc-not-exists": {
|
||||
bindings: []*bindingInfo{},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
shouldFail: true,
|
||||
},
|
||||
"provisioning-pvc-annotations-nil": {
|
||||
bindings: []*bindingInfo{},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
shouldFail: true,
|
||||
},
|
||||
"provisioning-pvc-selected-node-dropped": {
|
||||
bindings: []*bindingInfo{},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{pvcSetEmptyAnnotations(provisionedPVC)},
|
||||
shouldFail: true,
|
||||
},
|
||||
"provisioning-pvc-selected-node-wrong-node": {
|
||||
bindings: []*bindingInfo{},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{pvcSetSelectedNode(provisionedPVC, "wrong-node")},
|
||||
shouldFail: true,
|
||||
},
|
||||
"binding-bound-provisioning-unbound": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a, addProvisionAnn(provisionedPVC)},
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
glog.V(4).Infof("Running test case %q", name)
|
||||
|
||||
// Setup
|
||||
pod := makePod(nil)
|
||||
testEnv := newTestBinder(t)
|
||||
testEnv.initVolumes(scenario.cachedPVs, nil)
|
||||
testEnv.initClaims(scenario.cachedPVCs, nil)
|
||||
|
||||
// Execute
|
||||
allBound, err := testEnv.internalBinder.checkBindings(pod, scenario.bindings, scenario.provisionedPVCs)
|
||||
|
||||
// Validate
|
||||
if !scenario.shouldFail && err != nil {
|
||||
t.Errorf("Test %q failed: returned error: %v", name, err)
|
||||
}
|
||||
if scenario.shouldFail && err == nil {
|
||||
t.Errorf("Test %q failed: returned success but expected error", name)
|
||||
}
|
||||
if scenario.expectedBound != allBound {
|
||||
t.Errorf("Test %q failed: returned bound %v", name, allBound)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBindPodVolumes(t *testing.T) {
|
||||
scenarios := map[string]struct {
|
||||
// Inputs
|
||||
// These tests only support a single pv and pvc and static binding
|
||||
bindingsNil bool // Pass in nil bindings slice
|
||||
binding *bindingInfo
|
||||
cachedPV *v1.PersistentVolume
|
||||
cachedPVC *v1.PersistentVolumeClaim
|
||||
apiPV *v1.PersistentVolume
|
||||
|
||||
// This function runs with a delay of 5 seconds
|
||||
delayFunc func(*testing.T, *testEnv, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim)
|
||||
|
||||
// Expected return values
|
||||
shouldFail bool
|
||||
}{
|
||||
"nothing-to-bind-nil": {
|
||||
bindingsNil: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
"nothing-to-bind-empty": {},
|
||||
"already-bound": {
|
||||
binding: binding1aBound,
|
||||
cachedPV: pvNode1aBound,
|
||||
cachedPVC: boundPVCNode1a,
|
||||
},
|
||||
"binding-succeeds-after-time": {
|
||||
binding: binding1aBound,
|
||||
cachedPV: pvNode1a,
|
||||
cachedPVC: unboundPVC,
|
||||
delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
|
||||
// Update PVC to be fully bound to PV
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC.ResourceVersion = "100"
|
||||
newPVC.Spec.VolumeName = pv.Name
|
||||
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, annBindCompleted, "yes")
|
||||
|
||||
// Update pvc cache, fake client doesn't invoke informers
|
||||
internalBinder, ok := testEnv.binder.(*volumeBinder)
|
||||
if !ok {
|
||||
t.Fatalf("Failed to convert to internal binder")
|
||||
}
|
||||
|
||||
pvcCache := internalBinder.pvcCache
|
||||
internalPVCCache, ok := pvcCache.(*pvcAssumeCache)
|
||||
if !ok {
|
||||
t.Fatalf("Failed to convert to internal PVC cache")
|
||||
}
|
||||
internalPVCCache.add(newPVC)
|
||||
},
|
||||
},
|
||||
"pod-deleted-after-time": {
|
||||
binding: binding1aBound,
|
||||
cachedPV: pvNode1a,
|
||||
cachedPVC: unboundPVC,
|
||||
delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
|
||||
bindingsCache := testEnv.binder.GetBindingsCache()
|
||||
if bindingsCache == nil {
|
||||
t.Fatalf("Failed to get bindings cache")
|
||||
}
|
||||
|
||||
// Delete the pod from the cache
|
||||
bindingsCache.DeleteBindings(pod)
|
||||
|
||||
// Check that it's deleted
|
||||
bindings := bindingsCache.GetBindings(pod, "node1")
|
||||
if bindings != nil {
|
||||
t.Fatalf("Failed to delete bindings")
|
||||
}
|
||||
},
|
||||
shouldFail: true,
|
||||
},
|
||||
"binding-times-out": {
|
||||
binding: binding1aBound,
|
||||
cachedPV: pvNode1a,
|
||||
cachedPVC: unboundPVC,
|
||||
shouldFail: true,
|
||||
},
|
||||
"binding-fails": {
|
||||
binding: binding1bBound,
|
||||
cachedPV: pvNode1b,
|
||||
apiPV: pvNode1bBoundHigherVersion,
|
||||
cachedPVC: unboundPVC2,
|
||||
shouldFail: true,
|
||||
},
|
||||
"check-fails": {
|
||||
binding: binding1aBound,
|
||||
cachedPV: pvNode1a,
|
||||
cachedPVC: unboundPVC,
|
||||
delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
|
||||
// Delete PVC
|
||||
// Update pvc cache, fake client doesn't invoke informers
|
||||
internalBinder, ok := testEnv.binder.(*volumeBinder)
|
||||
if !ok {
|
||||
t.Fatalf("Failed to convert to internal binder")
|
||||
}
|
||||
|
||||
pvcCache := internalBinder.pvcCache
|
||||
internalPVCCache, ok := pvcCache.(*pvcAssumeCache)
|
||||
if !ok {
|
||||
t.Fatalf("Failed to convert to internal PVC cache")
|
||||
}
|
||||
internalPVCCache.delete(pvc)
|
||||
},
|
||||
shouldFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
glog.V(4).Infof("Running test case %q", name)
|
||||
|
||||
// Setup
|
||||
pod := makePod(nil)
|
||||
if scenario.apiPV == nil {
|
||||
scenario.apiPV = scenario.cachedPV
|
||||
}
|
||||
testEnv := newTestBinder(t)
|
||||
if !scenario.bindingsNil {
|
||||
if scenario.binding != nil {
|
||||
testEnv.initVolumes([]*v1.PersistentVolume{scenario.cachedPV}, []*v1.PersistentVolume{scenario.apiPV})
|
||||
testEnv.initClaims([]*v1.PersistentVolumeClaim{scenario.cachedPVC}, nil)
|
||||
testEnv.assumeVolumes(t, name, "node1", pod, []*bindingInfo{scenario.binding}, []*v1.PersistentVolumeClaim{})
|
||||
} else {
|
||||
testEnv.assumeVolumes(t, name, "node1", pod, []*bindingInfo{}, []*v1.PersistentVolumeClaim{})
|
||||
}
|
||||
}
|
||||
|
||||
if scenario.delayFunc != nil {
|
||||
go func() {
|
||||
time.Sleep(5 * time.Second)
|
||||
glog.V(5).Infof("Running delay function")
|
||||
scenario.delayFunc(t, testEnv, pod, scenario.binding.pv, scenario.binding.pvc)
|
||||
}()
|
||||
}
|
||||
|
||||
// Execute
|
||||
err := testEnv.binder.BindPodVolumes(pod)
|
||||
|
||||
// Validate
|
||||
if !scenario.shouldFail && err != nil {
|
||||
t.Errorf("Test %q failed: returned error: %v", name, err)
|
||||
}
|
||||
if scenario.shouldFail && err == nil {
|
||||
t.Errorf("Test %q failed: returned success but expected error", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindAssumeVolumes(t *testing.T) {
|
||||
// Set feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true")
|
||||
@@ -1080,17 +1458,15 @@ func TestFindAssumeVolumes(t *testing.T) {
|
||||
expectedBindings := testEnv.getPodBindings(t, "before-assume", testNode.Name, pod)
|
||||
|
||||
// 2. Assume matches
|
||||
allBound, bindingRequired, err := testEnv.binder.AssumePodVolumes(pod, testNode.Name)
|
||||
allBound, err := testEnv.binder.AssumePodVolumes(pod, testNode.Name)
|
||||
if err != nil {
|
||||
t.Errorf("Test failed: AssumePodVolumes returned error: %v", err)
|
||||
}
|
||||
if allBound {
|
||||
t.Errorf("Test failed: detected unbound volumes as bound")
|
||||
}
|
||||
if !bindingRequired {
|
||||
t.Errorf("Test failed: binding not required")
|
||||
}
|
||||
testEnv.validateAssume(t, "assume", pod, expectedBindings, nil)
|
||||
|
||||
// After assume, claimref should be set on pv
|
||||
expectedBindings = testEnv.getPodBindings(t, "after-assume", testNode.Name, pod)
|
||||
|
||||
@@ -1106,6 +1482,6 @@ func TestFindAssumeVolumes(t *testing.T) {
|
||||
if !unboundSatisfied {
|
||||
t.Errorf("Test failed: couldn't find PVs for all PVCs")
|
||||
}
|
||||
testEnv.validatePodCache(t, "after-assume", testNode.Name, pod, expectedBindings, nil)
|
||||
testEnv.validatePodCache(t, "after-assume", testNode.Name, pod, expectedBindings, []*v1.PersistentVolumeClaim{})
|
||||
}
|
||||
}
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/volume_host.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/volume_host.go
generated
vendored
@@ -25,8 +25,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/util/io"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
vol "k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
@@ -79,10 +79,6 @@ func (ctrl *PersistentVolumeController) GetMounter(pluginName string) mount.Inte
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetWriter() io.Writer {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetHostName() string {
|
||||
return ""
|
||||
}
|
||||
@@ -128,3 +124,8 @@ func (ctrl *PersistentVolumeController) GetNodeName() types.NodeName {
|
||||
func (ctrl *PersistentVolumeController) GetEventRecorder() record.EventRecorder {
|
||||
return ctrl.eventRecorder
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetCSIClient() csiclientset.Interface {
|
||||
// No volume plugin needs csi.storage.k8s.io client in PV controller.
|
||||
return nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user