Add generated file
This PR adds generated files under pkg/client and vendor folder.
This commit is contained in:
60
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/BUILD
generated
vendored
Normal file
60
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/BUILD
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["reconciler.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler",
|
||||
deps = [
|
||||
"//pkg/controller/volume/attachdetach/cache:go_default_library",
|
||||
"//pkg/controller/volume/attachdetach/statusupdater:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
"//pkg/util/goroutinemap/exponentialbackoff:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["reconciler_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/volume/attachdetach/cache:go_default_library",
|
||||
"//pkg/controller/volume/attachdetach/statusupdater:go_default_library",
|
||||
"//pkg/controller/volume/attachdetach/testing:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
373
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler.go
generated
vendored
Normal file
373
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler.go
generated
vendored
Normal file
@@ -0,0 +1,373 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package reconciler implements interfaces that attempt to reconcile the
|
||||
// desired state of the with the actual state of the world by triggering
|
||||
// actions.
|
||||
package reconciler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
|
||||
kevents "k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||
)
|
||||
|
||||
// Reconciler runs a periodic loop to reconcile the desired state of the world with
|
||||
// the actual state of the world by triggering attach detach operations.
|
||||
// Note: This is distinct from the Reconciler implemented by the kubelet volume
|
||||
// manager. This reconciles state for the attach/detach controller. That
|
||||
// reconciles state for the kubelet volume manager.
|
||||
type Reconciler interface {
|
||||
// Starts running the reconciliation loop which executes periodically, checks
|
||||
// if volumes that should be attached are attached and volumes that should
|
||||
// be detached are detached. If not, it will trigger attach/detach
|
||||
// operations to rectify.
|
||||
Run(stopCh <-chan struct{})
|
||||
}
|
||||
|
||||
// NewReconciler returns a new instance of Reconciler that waits loopPeriod
|
||||
// between successive executions.
|
||||
// loopPeriod is the amount of time the reconciler loop waits between
|
||||
// successive executions.
|
||||
// maxWaitForUnmountDuration is the max amount of time the reconciler will wait
|
||||
// for the volume to be safely unmounted, after this it will detach the volume
|
||||
// anyway (to handle crashed/unavailable nodes). If during this time the volume
|
||||
// becomes used by a new pod, the detach request will be aborted and the timer
|
||||
// cleared.
|
||||
func NewReconciler(
|
||||
loopPeriod time.Duration,
|
||||
maxWaitForUnmountDuration time.Duration,
|
||||
syncDuration time.Duration,
|
||||
disableReconciliationSync bool,
|
||||
desiredStateOfWorld cache.DesiredStateOfWorld,
|
||||
actualStateOfWorld cache.ActualStateOfWorld,
|
||||
attacherDetacher operationexecutor.OperationExecutor,
|
||||
nodeStatusUpdater statusupdater.NodeStatusUpdater,
|
||||
recorder record.EventRecorder) Reconciler {
|
||||
return &reconciler{
|
||||
loopPeriod: loopPeriod,
|
||||
maxWaitForUnmountDuration: maxWaitForUnmountDuration,
|
||||
syncDuration: syncDuration,
|
||||
disableReconciliationSync: disableReconciliationSync,
|
||||
desiredStateOfWorld: desiredStateOfWorld,
|
||||
actualStateOfWorld: actualStateOfWorld,
|
||||
attacherDetacher: attacherDetacher,
|
||||
nodeStatusUpdater: nodeStatusUpdater,
|
||||
timeOfLastSync: time.Now(),
|
||||
recorder: recorder,
|
||||
}
|
||||
}
|
||||
|
||||
type reconciler struct {
|
||||
loopPeriod time.Duration
|
||||
maxWaitForUnmountDuration time.Duration
|
||||
syncDuration time.Duration
|
||||
desiredStateOfWorld cache.DesiredStateOfWorld
|
||||
actualStateOfWorld cache.ActualStateOfWorld
|
||||
attacherDetacher operationexecutor.OperationExecutor
|
||||
nodeStatusUpdater statusupdater.NodeStatusUpdater
|
||||
timeOfLastSync time.Time
|
||||
disableReconciliationSync bool
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func (rc *reconciler) Run(stopCh <-chan struct{}) {
|
||||
wait.Until(rc.reconciliationLoopFunc(), rc.loopPeriod, stopCh)
|
||||
}
|
||||
|
||||
// reconciliationLoopFunc this can be disabled via cli option disableReconciliation.
|
||||
// It periodically checks whether the attached volumes from actual state
|
||||
// are still attached to the node and update the status if they are not.
|
||||
func (rc *reconciler) reconciliationLoopFunc() func() {
|
||||
return func() {
|
||||
|
||||
rc.reconcile()
|
||||
|
||||
if rc.disableReconciliationSync {
|
||||
glog.V(5).Info("Skipping reconciling attached volumes still attached since it is disabled via the command line.")
|
||||
} else if rc.syncDuration < time.Second {
|
||||
glog.V(5).Info("Skipping reconciling attached volumes still attached since it is set to less than one second via the command line.")
|
||||
} else if time.Since(rc.timeOfLastSync) > rc.syncDuration {
|
||||
glog.V(5).Info("Starting reconciling attached volumes still attached")
|
||||
rc.sync()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *reconciler) sync() {
|
||||
defer rc.updateSyncTime()
|
||||
rc.syncStates()
|
||||
}
|
||||
|
||||
func (rc *reconciler) updateSyncTime() {
|
||||
rc.timeOfLastSync = time.Now()
|
||||
}
|
||||
|
||||
func (rc *reconciler) syncStates() {
|
||||
volumesPerNode := rc.actualStateOfWorld.GetAttachedVolumesPerNode()
|
||||
rc.attacherDetacher.VerifyVolumesAreAttached(volumesPerNode, rc.actualStateOfWorld)
|
||||
}
|
||||
|
||||
// isMultiAttachForbidden checks if attaching this volume to multiple nodes is definitely not allowed/possible.
|
||||
// In its current form, this function can only reliably say for which volumes it's definitely forbidden. If it returns
|
||||
// false, it is not guaranteed that multi-attach is actually supported by the volume type and we must rely on the
|
||||
// attacher to fail fast in such cases.
|
||||
// Please see https://github.com/kubernetes/kubernetes/issues/40669 and https://github.com/kubernetes/kubernetes/pull/40148#discussion_r98055047
|
||||
func (rc *reconciler) isMultiAttachForbidden(volumeSpec *volume.Spec) bool {
|
||||
if volumeSpec.Volume != nil {
|
||||
// Check for volume types which are known to fail slow or cause trouble when trying to multi-attach
|
||||
if volumeSpec.Volume.AzureDisk != nil ||
|
||||
volumeSpec.Volume.Cinder != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Only if this volume is a persistent volume, we have reliable information on whether it's allowed or not to
|
||||
// multi-attach. We trust in the individual volume implementations to not allow unsupported access modes
|
||||
if volumeSpec.PersistentVolume != nil {
|
||||
// Check for persistent volume types which do not fail when trying to multi-attach
|
||||
if volumeSpec.PersistentVolume.Spec.VsphereVolume != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(volumeSpec.PersistentVolume.Spec.AccessModes) == 0 {
|
||||
// No access mode specified so we don't know for sure. Let the attacher fail if needed
|
||||
return false
|
||||
}
|
||||
|
||||
// check if this volume is allowed to be attached to multiple PODs/nodes, if yes, return false
|
||||
for _, accessMode := range volumeSpec.PersistentVolume.Spec.AccessModes {
|
||||
if accessMode == v1.ReadWriteMany || accessMode == v1.ReadOnlyMany {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// we don't know if it's supported or not and let the attacher fail later in cases it's not supported
|
||||
return false
|
||||
}
|
||||
|
||||
func (rc *reconciler) reconcile() {
|
||||
// Detaches are triggered before attaches so that volumes referenced by
|
||||
// pods that are rescheduled to a different node are detached first.
|
||||
|
||||
// Ensure volumes that should be detached are detached.
|
||||
for _, attachedVolume := range rc.actualStateOfWorld.GetAttachedVolumes() {
|
||||
if !rc.desiredStateOfWorld.VolumeExists(
|
||||
attachedVolume.VolumeName, attachedVolume.NodeName) {
|
||||
|
||||
// Don't even try to start an operation if there is already one running
|
||||
// This check must be done before we do any other checks, as otherwise the other checks
|
||||
// may pass while at the same time the volume leaves the pending state, resulting in
|
||||
// double detach attempts
|
||||
if rc.attacherDetacher.IsOperationPending(attachedVolume.VolumeName, "") {
|
||||
glog.V(10).Infof("Operation for volume %q is already running. Can't start detach for %q", attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
continue
|
||||
}
|
||||
|
||||
// Set the detach request time
|
||||
elapsedTime, err := rc.actualStateOfWorld.SetDetachRequestTime(attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Cannot trigger detach because it fails to set detach request time with error %v", err)
|
||||
continue
|
||||
}
|
||||
// Check whether timeout has reached the maximum waiting time
|
||||
timeout := elapsedTime > rc.maxWaitForUnmountDuration
|
||||
// Check whether volume is still mounted. Skip detach if it is still mounted unless timeout
|
||||
if attachedVolume.MountedByNode && !timeout {
|
||||
glog.V(12).Infof(attachedVolume.GenerateMsgDetailed("Cannot detach volume because it is still mounted", ""))
|
||||
continue
|
||||
}
|
||||
|
||||
// Before triggering volume detach, mark volume as detached and update the node status
|
||||
// If it fails to update node status, skip detach volume
|
||||
err = rc.actualStateOfWorld.RemoveVolumeFromReportAsAttached(attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("RemoveVolumeFromReportAsAttached failed while removing volume %q from node %q with: %v",
|
||||
attachedVolume.VolumeName,
|
||||
attachedVolume.NodeName,
|
||||
err)
|
||||
}
|
||||
|
||||
// Update Node Status to indicate volume is no longer safe to mount.
|
||||
err = rc.nodeStatusUpdater.UpdateNodeStatuses()
|
||||
if err != nil {
|
||||
// Skip detaching this volume if unable to update node status
|
||||
glog.Errorf(attachedVolume.GenerateErrorDetailed("UpdateNodeStatuses failed while attempting to report volume as attached", err).Error())
|
||||
continue
|
||||
}
|
||||
|
||||
// Trigger detach volume which requires verifing safe to detach step
|
||||
// If timeout is true, skip verifySafeToDetach check
|
||||
glog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting attacherDetacher.DetachVolume", ""))
|
||||
verifySafeToDetach := !timeout
|
||||
err = rc.attacherDetacher.DetachVolume(attachedVolume.AttachedVolume, verifySafeToDetach, rc.actualStateOfWorld)
|
||||
if err == nil {
|
||||
if !timeout {
|
||||
glog.Infof(attachedVolume.GenerateMsgDetailed("attacherDetacher.DetachVolume started", ""))
|
||||
} else {
|
||||
glog.Warningf(attachedVolume.GenerateMsgDetailed("attacherDetacher.DetachVolume started", fmt.Sprintf("This volume is not safe to detach, but maxWaitForUnmountDuration %v expired, force detaching", rc.maxWaitForUnmountDuration)))
|
||||
}
|
||||
}
|
||||
if err != nil && !exponentialbackoff.IsExponentialBackoff(err) {
|
||||
// Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected.
|
||||
// Log all other errors.
|
||||
glog.Errorf(attachedVolume.GenerateErrorDetailed("attacherDetacher.DetachVolume failed to start", err).Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rc.attachDesiredVolumes()
|
||||
|
||||
// Update Node Status
|
||||
err := rc.nodeStatusUpdater.UpdateNodeStatuses()
|
||||
if err != nil {
|
||||
glog.Warningf("UpdateNodeStatuses failed with: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *reconciler) attachDesiredVolumes() {
|
||||
// Ensure volumes that should be attached are attached.
|
||||
for _, volumeToAttach := range rc.desiredStateOfWorld.GetVolumesToAttach() {
|
||||
if rc.actualStateOfWorld.VolumeNodeExists(volumeToAttach.VolumeName, volumeToAttach.NodeName) {
|
||||
// Volume/Node exists, touch it to reset detachRequestedTime
|
||||
if glog.V(5) {
|
||||
glog.Infof(volumeToAttach.GenerateMsgDetailed("Volume attached--touching", ""))
|
||||
}
|
||||
rc.actualStateOfWorld.ResetDetachRequestTime(volumeToAttach.VolumeName, volumeToAttach.NodeName)
|
||||
continue
|
||||
}
|
||||
// Don't even try to start an operation if there is already one running
|
||||
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "") {
|
||||
if glog.V(10) {
|
||||
glog.Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if rc.isMultiAttachForbidden(volumeToAttach.VolumeSpec) {
|
||||
nodes := rc.actualStateOfWorld.GetNodesForVolume(volumeToAttach.VolumeName)
|
||||
if len(nodes) > 0 {
|
||||
if !volumeToAttach.MultiAttachErrorReported {
|
||||
rc.reportMultiAttachError(volumeToAttach, nodes)
|
||||
rc.desiredStateOfWorld.SetMultiAttachError(volumeToAttach.VolumeName, volumeToAttach.NodeName)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Volume/Node doesn't exist, spawn a goroutine to attach it
|
||||
if glog.V(5) {
|
||||
glog.Infof(volumeToAttach.GenerateMsgDetailed("Starting attacherDetacher.AttachVolume", ""))
|
||||
}
|
||||
err := rc.attacherDetacher.AttachVolume(volumeToAttach.VolumeToAttach, rc.actualStateOfWorld)
|
||||
if err == nil {
|
||||
glog.Infof(volumeToAttach.GenerateMsgDetailed("attacherDetacher.AttachVolume started", ""))
|
||||
}
|
||||
if err != nil && !exponentialbackoff.IsExponentialBackoff(err) {
|
||||
// Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected.
|
||||
// Log all other errors.
|
||||
glog.Errorf(volumeToAttach.GenerateErrorDetailed("attacherDetacher.AttachVolume failed to start", err).Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// reportMultiAttachError sends events and logs situation that a volume that
|
||||
// should be attached to a node is already attached to different node(s).
|
||||
func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach, nodes []types.NodeName) {
|
||||
// Filter out the current node from list of nodes where the volume is
|
||||
// attached.
|
||||
// Some methods need []string, some other needs []NodeName, collect both.
|
||||
// In theory, these arrays should have always only one element - the
|
||||
// controller does not allow more than one attachment. But use array just
|
||||
// in case...
|
||||
otherNodes := []types.NodeName{}
|
||||
otherNodesStr := []string{}
|
||||
for _, node := range nodes {
|
||||
if node != volumeToAttach.NodeName {
|
||||
otherNodes = append(otherNodes, node)
|
||||
otherNodesStr = append(otherNodesStr, string(node))
|
||||
}
|
||||
}
|
||||
|
||||
// Get list of pods that use the volume on the other nodes.
|
||||
pods := rc.desiredStateOfWorld.GetVolumePodsOnNodes(otherNodes, volumeToAttach.VolumeName)
|
||||
|
||||
if len(pods) == 0 {
|
||||
// We did not find any pods that requests the volume. The pod must have been deleted already.
|
||||
simpleMsg, _ := volumeToAttach.GenerateMsg("Multi-Attach error", "Volume is already exclusively attached to one node and can't be attached to another")
|
||||
for _, pod := range volumeToAttach.ScheduledPods {
|
||||
rc.recorder.Eventf(pod, v1.EventTypeWarning, kevents.FailedAttachVolume, simpleMsg)
|
||||
}
|
||||
// Log detailed message to system admin
|
||||
nodeList := strings.Join(otherNodesStr, ", ")
|
||||
detailedMsg := volumeToAttach.GenerateMsgDetailed("Multi-Attach error", fmt.Sprintf("Volume is already exclusively attached to node %s and can't be attached to another", nodeList))
|
||||
glog.Warningf(detailedMsg)
|
||||
return
|
||||
}
|
||||
|
||||
// There are pods that require the volume and run on another node. Typically
|
||||
// it's user error, e.g. a ReplicaSet uses a PVC and has >1 replicas. Let
|
||||
// the user know what pods are blocking the volume.
|
||||
for _, scheduledPod := range volumeToAttach.ScheduledPods {
|
||||
// Each scheduledPod must get a custom message. They can run in
|
||||
// different namespaces and user of a namespace should not see names of
|
||||
// pods in other namespaces.
|
||||
localPodNames := []string{} // Names of pods in scheduledPods's namespace
|
||||
otherPods := 0 // Count of pods in other namespaces
|
||||
for _, pod := range pods {
|
||||
if pod.Namespace == scheduledPod.Namespace {
|
||||
localPodNames = append(localPodNames, pod.Name)
|
||||
} else {
|
||||
otherPods++
|
||||
}
|
||||
}
|
||||
|
||||
var msg string
|
||||
if len(localPodNames) > 0 {
|
||||
msg = fmt.Sprintf("Volume is already used by pod(s) %s", strings.Join(localPodNames, ", "))
|
||||
if otherPods > 0 {
|
||||
msg = fmt.Sprintf("%s and %d pod(s) in different namespaces", msg, otherPods)
|
||||
}
|
||||
} else {
|
||||
// No local pods, there are pods only in different namespaces.
|
||||
msg = fmt.Sprintf("Volume is already used by %d pod(s) in different namespaces", otherPods)
|
||||
}
|
||||
simpleMsg, _ := volumeToAttach.GenerateMsg("Multi-Attach error", msg)
|
||||
rc.recorder.Eventf(scheduledPod, v1.EventTypeWarning, kevents.FailedAttachVolume, simpleMsg)
|
||||
}
|
||||
|
||||
// Log all pods for system admin
|
||||
podNames := []string{}
|
||||
for _, pod := range pods {
|
||||
podNames = append(podNames, pod.Namespace+"/"+pod.Name)
|
||||
}
|
||||
detailedMsg := volumeToAttach.GenerateMsgDetailed("Multi-Attach error", fmt.Sprintf("Volume is already used by pods %s on node %s", strings.Join(podNames, ", "), strings.Join(otherNodesStr, ", ")))
|
||||
glog.Warningf(detailedMsg)
|
||||
}
|
928
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go
generated
vendored
Normal file
928
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go
generated
vendored
Normal file
@@ -0,0 +1,928 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package reconciler
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
|
||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
|
||||
stringutil "k8s.io/kubernetes/pkg/util/strings"
|
||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||
)
|
||||
|
||||
const (
|
||||
reconcilerLoopPeriod time.Duration = 0 * time.Millisecond
|
||||
syncLoopPeriod time.Duration = 100 * time.Minute
|
||||
maxWaitForUnmountDuration time.Duration = 50 * time.Millisecond
|
||||
resyncPeriod time.Duration = 5 * time.Minute
|
||||
)
|
||||
|
||||
// Calls Run()
|
||||
// Verifies there are no calls to attach or detach.
|
||||
func Test_Run_Positive_DoNothing(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
asw := cache.NewActualStateOfWorld(volumePluginMgr)
|
||||
fakeKubeClient := controllervolumetesting.CreateTestClient()
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
fakeHandler := volumetesting.NewBlockVolumePathHandler()
|
||||
ad := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
|
||||
fakeKubeClient,
|
||||
volumePluginMgr,
|
||||
fakeRecorder,
|
||||
false, /* checkNodeCapabilitiesBeforeMount */
|
||||
fakeHandler))
|
||||
informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
|
||||
nsu := statusupdater.NewNodeStatusUpdater(
|
||||
fakeKubeClient, informerFactory.Core().V1().Nodes().Lister(), asw)
|
||||
reconciler := NewReconciler(
|
||||
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, false, dsw, asw, ad, nsu, fakeRecorder)
|
||||
|
||||
// Act
|
||||
ch := make(chan struct{})
|
||||
go reconciler.Run(ch)
|
||||
defer close(ch)
|
||||
|
||||
// Assert
|
||||
waitForNewAttacherCallCount(t, 0 /* expectedCallCount */, fakePlugin)
|
||||
verifyNewAttacherCallCount(t, true /* expectZeroNewAttacherCallCount */, fakePlugin)
|
||||
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
|
||||
waitForAttachCallCount(t, 0 /* expectedAttachCallCount */, fakePlugin)
|
||||
waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin)
|
||||
}
|
||||
|
||||
// Populates desiredStateOfWorld cache with one node/volume/pod tuple.
|
||||
// Calls Run()
|
||||
// Verifies there is one attach call and no detach calls.
|
||||
func Test_Run_Positive_OneDesiredVolumeAttach(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
asw := cache.NewActualStateOfWorld(volumePluginMgr)
|
||||
fakeKubeClient := controllervolumetesting.CreateTestClient()
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
fakeHandler := volumetesting.NewBlockVolumePathHandler()
|
||||
ad := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
|
||||
fakeKubeClient,
|
||||
volumePluginMgr,
|
||||
fakeRecorder,
|
||||
false, /* checkNodeCapabilitiesBeforeMount */
|
||||
fakeHandler))
|
||||
nsu := statusupdater.NewFakeNodeStatusUpdater(false /* returnError */)
|
||||
reconciler := NewReconciler(
|
||||
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, false, dsw, asw, ad, nsu, fakeRecorder)
|
||||
podName := "pod-uid"
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||
if volumeExists {
|
||||
t.Fatalf(
|
||||
"Volume %q/node %q should not exist, but it does.",
|
||||
volumeName,
|
||||
nodeName)
|
||||
}
|
||||
|
||||
_, podErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
|
||||
if podErr != nil {
|
||||
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podErr)
|
||||
}
|
||||
|
||||
// Act
|
||||
ch := make(chan struct{})
|
||||
go reconciler.Run(ch)
|
||||
defer close(ch)
|
||||
|
||||
// Assert
|
||||
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
|
||||
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
|
||||
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
|
||||
}
|
||||
|
||||
// Populates desiredStateOfWorld cache with one node/volume/pod tuple.
|
||||
// Calls Run()
|
||||
// Verifies there is one attach call and no detach calls.
|
||||
// Marks the node/volume as unmounted.
|
||||
// Deletes the node/volume/pod tuple from desiredStateOfWorld cache.
|
||||
// Verifies there is one detach call and no (new) attach calls.
|
||||
func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithUnmountedVolume(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
asw := cache.NewActualStateOfWorld(volumePluginMgr)
|
||||
fakeKubeClient := controllervolumetesting.CreateTestClient()
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
fakeHandler := volumetesting.NewBlockVolumePathHandler()
|
||||
ad := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
|
||||
fakeKubeClient,
|
||||
volumePluginMgr,
|
||||
fakeRecorder,
|
||||
false, /* checkNodeCapabilitiesBeforeMount */
|
||||
fakeHandler))
|
||||
nsu := statusupdater.NewFakeNodeStatusUpdater(false /* returnError */)
|
||||
reconciler := NewReconciler(
|
||||
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, false, dsw, asw, ad, nsu, fakeRecorder)
|
||||
podName := "pod-uid"
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||
if volumeExists {
|
||||
t.Fatalf(
|
||||
"Volume %q/node %q should not exist, but it does.",
|
||||
volumeName,
|
||||
nodeName)
|
||||
}
|
||||
|
||||
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
|
||||
if podAddErr != nil {
|
||||
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podAddErr)
|
||||
}
|
||||
|
||||
// Act
|
||||
ch := make(chan struct{})
|
||||
go reconciler.Run(ch)
|
||||
defer close(ch)
|
||||
|
||||
// Assert
|
||||
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
|
||||
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
|
||||
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
|
||||
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
|
||||
waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin)
|
||||
|
||||
// Act
|
||||
dsw.DeletePod(types.UniquePodName(podName), generatedVolumeName, nodeName)
|
||||
volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName)
|
||||
if volumeExists {
|
||||
t.Fatalf(
|
||||
"Deleted pod %q from volume %q/node %q. Volume should also be deleted but it still exists.",
|
||||
podName,
|
||||
generatedVolumeName,
|
||||
nodeName)
|
||||
}
|
||||
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */)
|
||||
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */)
|
||||
|
||||
// Assert
|
||||
waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
|
||||
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
|
||||
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
|
||||
verifyNewDetacherCallCount(t, false /* expectZeroNewDetacherCallCount */, fakePlugin)
|
||||
waitForDetachCallCount(t, 1 /* expectedDetachCallCount */, fakePlugin)
|
||||
}
|
||||
|
||||
// Populates desiredStateOfWorld cache with one node/volume/pod tuple.
|
||||
// Calls Run()
|
||||
// Verifies there is one attach call and no detach calls.
|
||||
// Deletes the node/volume/pod tuple from desiredStateOfWorld cache without first marking the node/volume as unmounted.
|
||||
// Verifies there is one detach call and no (new) attach calls.
|
||||
func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithMountedVolume(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
asw := cache.NewActualStateOfWorld(volumePluginMgr)
|
||||
fakeKubeClient := controllervolumetesting.CreateTestClient()
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
fakeHandler := volumetesting.NewBlockVolumePathHandler()
|
||||
ad := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
|
||||
fakeKubeClient,
|
||||
volumePluginMgr,
|
||||
fakeRecorder,
|
||||
false, /* checkNodeCapabilitiesBeforeMount */
|
||||
fakeHandler))
|
||||
nsu := statusupdater.NewFakeNodeStatusUpdater(false /* returnError */)
|
||||
reconciler := NewReconciler(
|
||||
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, false, dsw, asw, ad, nsu, fakeRecorder)
|
||||
podName := "pod-uid"
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||
if volumeExists {
|
||||
t.Fatalf(
|
||||
"Volume %q/node %q should not exist, but it does.",
|
||||
volumeName,
|
||||
nodeName)
|
||||
}
|
||||
|
||||
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
|
||||
if podAddErr != nil {
|
||||
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podAddErr)
|
||||
}
|
||||
|
||||
// Act
|
||||
ch := make(chan struct{})
|
||||
go reconciler.Run(ch)
|
||||
defer close(ch)
|
||||
|
||||
// Assert
|
||||
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
|
||||
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
|
||||
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
|
||||
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
|
||||
waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin)
|
||||
|
||||
// Act
|
||||
dsw.DeletePod(types.UniquePodName(podName), generatedVolumeName, nodeName)
|
||||
volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName)
|
||||
if volumeExists {
|
||||
t.Fatalf(
|
||||
"Deleted pod %q from volume %q/node %q. Volume should also be deleted but it still exists.",
|
||||
podName,
|
||||
generatedVolumeName,
|
||||
nodeName)
|
||||
}
|
||||
|
||||
// Assert -- Timer will triger detach
|
||||
waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
|
||||
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
|
||||
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
|
||||
verifyNewDetacherCallCount(t, false /* expectZeroNewDetacherCallCount */, fakePlugin)
|
||||
waitForDetachCallCount(t, 1 /* expectedDetachCallCount */, fakePlugin)
|
||||
}
|
||||
|
||||
// Populates desiredStateOfWorld cache with one node/volume/pod tuple.
|
||||
// Has node update fail
|
||||
// Calls Run()
|
||||
// Verifies there is one attach call and no detach calls.
|
||||
// Marks the node/volume as unmounted.
|
||||
// Deletes the node/volume/pod tuple from desiredStateOfWorld cache.
|
||||
// Verifies there are NO detach call and no (new) attach calls.
|
||||
func Test_Run_Negative_OneDesiredVolumeAttachThenDetachWithUnmountedVolumeUpdateStatusFail(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
asw := cache.NewActualStateOfWorld(volumePluginMgr)
|
||||
fakeKubeClient := controllervolumetesting.CreateTestClient()
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
fakeHandler := volumetesting.NewBlockVolumePathHandler()
|
||||
ad := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
|
||||
fakeKubeClient,
|
||||
volumePluginMgr,
|
||||
fakeRecorder,
|
||||
false, /* checkNodeCapabilitiesBeforeMount */
|
||||
fakeHandler))
|
||||
nsu := statusupdater.NewFakeNodeStatusUpdater(true /* returnError */)
|
||||
reconciler := NewReconciler(
|
||||
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, false, dsw, asw, ad, nsu, fakeRecorder)
|
||||
podName := "pod-uid"
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName, false /*keepTerminatedPodVolumes*/)
|
||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||
if volumeExists {
|
||||
t.Fatalf(
|
||||
"Volume %q/node %q should not exist, but it does.",
|
||||
volumeName,
|
||||
nodeName)
|
||||
}
|
||||
|
||||
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
|
||||
if podAddErr != nil {
|
||||
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podAddErr)
|
||||
}
|
||||
|
||||
// Act
|
||||
ch := make(chan struct{})
|
||||
go reconciler.Run(ch)
|
||||
defer close(ch)
|
||||
|
||||
// Assert
|
||||
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
|
||||
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
|
||||
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
|
||||
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
|
||||
waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin)
|
||||
|
||||
// Act
|
||||
dsw.DeletePod(types.UniquePodName(podName), generatedVolumeName, nodeName)
|
||||
volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName)
|
||||
if volumeExists {
|
||||
t.Fatalf(
|
||||
"Deleted pod %q from volume %q/node %q. Volume should also be deleted but it still exists.",
|
||||
podName,
|
||||
generatedVolumeName,
|
||||
nodeName)
|
||||
}
|
||||
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */)
|
||||
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */)
|
||||
|
||||
// Assert
|
||||
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
|
||||
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
|
||||
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
|
||||
verifyNewDetacherCallCount(t, false /* expectZeroNewDetacherCallCount */, fakePlugin)
|
||||
waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin)
|
||||
}
|
||||
|
||||
// Creates a volume with accessMode ReadWriteMany
|
||||
// Populates desiredStateOfWorld cache with two ode/volume/pod tuples pointing to the created volume
|
||||
// Calls Run()
|
||||
// Verifies there are two attach calls and no detach calls.
|
||||
// Deletes the first node/volume/pod tuple from desiredStateOfWorld cache without first marking the node/volume as unmounted.
|
||||
// Verifies there is one detach call and no (new) attach calls.
|
||||
// Deletes the second node/volume/pod tuple from desiredStateOfWorld cache without first marking the node/volume as unmounted.
|
||||
// Verifies there are two detach calls and no (new) attach calls.
|
||||
func Test_Run_OneVolumeAttachAndDetachMultipleNodesWithReadWriteMany(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
asw := cache.NewActualStateOfWorld(volumePluginMgr)
|
||||
fakeKubeClient := controllervolumetesting.CreateTestClient()
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
fakeHandler := volumetesting.NewBlockVolumePathHandler()
|
||||
ad := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
|
||||
fakeKubeClient,
|
||||
volumePluginMgr,
|
||||
fakeRecorder,
|
||||
false, /* checkNodeCapabilitiesBeforeMount */
|
||||
fakeHandler))
|
||||
nsu := statusupdater.NewFakeNodeStatusUpdater(false /* returnError */)
|
||||
reconciler := NewReconciler(
|
||||
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, false, dsw, asw, ad, nsu, fakeRecorder)
|
||||
podName1 := "pod-uid1"
|
||||
podName2 := "pod-uid2"
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
volumeSpec.PersistentVolume.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}
|
||||
nodeName1 := k8stypes.NodeName("node-name1")
|
||||
nodeName2 := k8stypes.NodeName("node-name2")
|
||||
dsw.AddNode(nodeName1, false /*keepTerminatedPodVolumes*/)
|
||||
dsw.AddNode(nodeName2, false /*keepTerminatedPodVolumes*/)
|
||||
|
||||
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName1), controllervolumetesting.NewPod(podName1, podName1), volumeSpec, nodeName1)
|
||||
if podAddErr != nil {
|
||||
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podAddErr)
|
||||
}
|
||||
_, podAddErr = dsw.AddPod(types.UniquePodName(podName2), controllervolumetesting.NewPod(podName2, podName2), volumeSpec, nodeName2)
|
||||
if podAddErr != nil {
|
||||
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podAddErr)
|
||||
}
|
||||
|
||||
// Act
|
||||
ch := make(chan struct{})
|
||||
go reconciler.Run(ch)
|
||||
defer close(ch)
|
||||
|
||||
// Assert
|
||||
waitForNewAttacherCallCount(t, 2 /* expectedCallCount */, fakePlugin)
|
||||
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
|
||||
waitForTotalAttachCallCount(t, 2 /* expectedAttachCallCount */, fakePlugin)
|
||||
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
|
||||
waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin)
|
||||
waitForAttachedToNodesCount(t, 2 /* expectedNodeCount */, generatedVolumeName, asw)
|
||||
|
||||
// Act
|
||||
dsw.DeletePod(types.UniquePodName(podName1), generatedVolumeName, nodeName1)
|
||||
volumeExists := dsw.VolumeExists(generatedVolumeName, nodeName1)
|
||||
if volumeExists {
|
||||
t.Fatalf(
|
||||
"Deleted pod %q from volume %q/node %q. Volume should also be deleted but it still exists.",
|
||||
podName1,
|
||||
generatedVolumeName,
|
||||
nodeName1)
|
||||
}
|
||||
|
||||
// Assert -- Timer will triger detach
|
||||
waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
|
||||
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
|
||||
waitForTotalAttachCallCount(t, 2 /* expectedAttachCallCount */, fakePlugin)
|
||||
verifyNewDetacherCallCount(t, false /* expectZeroNewDetacherCallCount */, fakePlugin)
|
||||
waitForTotalDetachCallCount(t, 1 /* expectedDetachCallCount */, fakePlugin)
|
||||
|
||||
// Act
|
||||
dsw.DeletePod(types.UniquePodName(podName2), generatedVolumeName, nodeName2)
|
||||
volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName2)
|
||||
if volumeExists {
|
||||
t.Fatalf(
|
||||
"Deleted pod %q from volume %q/node %q. Volume should also be deleted but it still exists.",
|
||||
podName2,
|
||||
generatedVolumeName,
|
||||
nodeName2)
|
||||
}
|
||||
|
||||
// Assert -- Timer will triger detach
|
||||
waitForNewDetacherCallCount(t, 2 /* expectedCallCount */, fakePlugin)
|
||||
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
|
||||
waitForTotalAttachCallCount(t, 2 /* expectedAttachCallCount */, fakePlugin)
|
||||
verifyNewDetacherCallCount(t, false /* expectZeroNewDetacherCallCount */, fakePlugin)
|
||||
waitForTotalDetachCallCount(t, 2 /* expectedDetachCallCount */, fakePlugin)
|
||||
}
|
||||
|
||||
// Creates a volume with accessMode ReadWriteOnce
|
||||
// Populates desiredStateOfWorld cache with two ode/volume/pod tuples pointing to the created volume
|
||||
// Calls Run()
|
||||
// Verifies there is one attach call and no detach calls.
|
||||
// Deletes the node/volume/pod tuple from desiredStateOfWorld which succeeded in attaching
|
||||
// Verifies there are two attach call and one detach call.
|
||||
func Test_Run_OneVolumeAttachAndDetachMultipleNodesWithReadWriteOnce(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
asw := cache.NewActualStateOfWorld(volumePluginMgr)
|
||||
fakeKubeClient := controllervolumetesting.CreateTestClient()
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
fakeHandler := volumetesting.NewBlockVolumePathHandler()
|
||||
ad := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
|
||||
fakeKubeClient,
|
||||
volumePluginMgr,
|
||||
fakeRecorder,
|
||||
false, /* checkNodeCapabilitiesBeforeMount */
|
||||
fakeHandler))
|
||||
nsu := statusupdater.NewFakeNodeStatusUpdater(false /* returnError */)
|
||||
reconciler := NewReconciler(
|
||||
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, false, dsw, asw, ad, nsu, fakeRecorder)
|
||||
podName1 := "pod-uid1"
|
||||
podName2 := "pod-uid2"
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
volumeSpec.PersistentVolume.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||
nodeName1 := k8stypes.NodeName("node-name1")
|
||||
nodeName2 := k8stypes.NodeName("node-name2")
|
||||
dsw.AddNode(nodeName1, false /*keepTerminatedPodVolumes*/)
|
||||
dsw.AddNode(nodeName2, false /*keepTerminatedPodVolumes*/)
|
||||
|
||||
// Add both pods at the same time to provoke a potential race condition in the reconciler
|
||||
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName1), controllervolumetesting.NewPod(podName1, podName1), volumeSpec, nodeName1)
|
||||
if podAddErr != nil {
|
||||
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podAddErr)
|
||||
}
|
||||
_, podAddErr = dsw.AddPod(types.UniquePodName(podName2), controllervolumetesting.NewPod(podName2, podName2), volumeSpec, nodeName2)
|
||||
if podAddErr != nil {
|
||||
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podAddErr)
|
||||
}
|
||||
|
||||
// Act
|
||||
ch := make(chan struct{})
|
||||
go reconciler.Run(ch)
|
||||
defer close(ch)
|
||||
|
||||
// Assert
|
||||
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
|
||||
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
|
||||
waitForTotalAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
|
||||
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
|
||||
waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin)
|
||||
waitForAttachedToNodesCount(t, 1 /* expectedNodeCount */, generatedVolumeName, asw)
|
||||
|
||||
nodesForVolume := asw.GetNodesForVolume(generatedVolumeName)
|
||||
|
||||
// check if multiattach is marked
|
||||
// at least one volume+node should be marked with multiattach error
|
||||
nodeAttachedTo := nodesForVolume[0]
|
||||
waitForMultiAttachErrorOnNode(t, nodeAttachedTo, dsw)
|
||||
|
||||
// Act
|
||||
podToDelete := ""
|
||||
if nodesForVolume[0] == nodeName1 {
|
||||
podToDelete = podName1
|
||||
} else if nodesForVolume[0] == nodeName2 {
|
||||
podToDelete = podName2
|
||||
} else {
|
||||
t.Fatal("Volume attached to unexpected node")
|
||||
}
|
||||
|
||||
dsw.DeletePod(types.UniquePodName(podToDelete), generatedVolumeName, nodesForVolume[0])
|
||||
volumeExists := dsw.VolumeExists(generatedVolumeName, nodesForVolume[0])
|
||||
if volumeExists {
|
||||
t.Fatalf(
|
||||
"Deleted pod %q from volume %q/node %q. Volume should also be deleted but it still exists.",
|
||||
podToDelete,
|
||||
generatedVolumeName,
|
||||
nodesForVolume[0])
|
||||
}
|
||||
|
||||
// Assert
|
||||
waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
|
||||
verifyNewDetacherCallCount(t, false /* expectZeroNewDetacherCallCount */, fakePlugin)
|
||||
waitForTotalDetachCallCount(t, 1 /* expectedDetachCallCount */, fakePlugin)
|
||||
waitForNewAttacherCallCount(t, 2 /* expectedCallCount */, fakePlugin)
|
||||
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
|
||||
waitForTotalAttachCallCount(t, 2 /* expectedAttachCallCount */, fakePlugin)
|
||||
}
|
||||
|
||||
func Test_ReportMultiAttachError(t *testing.T) {
|
||||
type nodeWithPods struct {
|
||||
name k8stypes.NodeName
|
||||
podNames []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
nodes []nodeWithPods
|
||||
expectedEvents []string
|
||||
}{
|
||||
{
|
||||
"no pods use the volume",
|
||||
[]nodeWithPods{
|
||||
{"node1", []string{"ns1/pod1"}},
|
||||
},
|
||||
[]string{"Warning FailedAttachVolume Multi-Attach error for volume \"volume-name\" Volume is already exclusively attached to one node and can't be attached to another"},
|
||||
},
|
||||
{
|
||||
"pods in the same namespace use the volume",
|
||||
[]nodeWithPods{
|
||||
{"node1", []string{"ns1/pod1"}},
|
||||
{"node2", []string{"ns1/pod2"}},
|
||||
},
|
||||
[]string{"Warning FailedAttachVolume Multi-Attach error for volume \"volume-name\" Volume is already used by pod(s) pod2"},
|
||||
},
|
||||
{
|
||||
"pods in anotother namespace use the volume",
|
||||
[]nodeWithPods{
|
||||
{"node1", []string{"ns1/pod1"}},
|
||||
{"node2", []string{"ns2/pod2"}},
|
||||
},
|
||||
[]string{"Warning FailedAttachVolume Multi-Attach error for volume \"volume-name\" Volume is already used by 1 pod(s) in different namespaces"},
|
||||
},
|
||||
{
|
||||
"pods both in the same and anotother namespace use the volume",
|
||||
[]nodeWithPods{
|
||||
{"node1", []string{"ns1/pod1"}},
|
||||
{"node2", []string{"ns2/pod2"}},
|
||||
{"node3", []string{"ns1/pod3"}},
|
||||
},
|
||||
[]string{"Warning FailedAttachVolume Multi-Attach error for volume \"volume-name\" Volume is already used by pod(s) pod3 and 1 pod(s) in different namespaces"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Arrange
|
||||
t.Logf("Test %q starting", test.name)
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
asw := cache.NewActualStateOfWorld(volumePluginMgr)
|
||||
fakeKubeClient := controllervolumetesting.CreateTestClient()
|
||||
fakeRecorder := record.NewFakeRecorder(100)
|
||||
fakeHandler := volumetesting.NewBlockVolumePathHandler()
|
||||
ad := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
|
||||
fakeKubeClient,
|
||||
volumePluginMgr,
|
||||
fakeRecorder,
|
||||
false, /* checkNodeCapabilitiesBeforeMount */
|
||||
fakeHandler))
|
||||
nsu := statusupdater.NewFakeNodeStatusUpdater(false /* returnError */)
|
||||
rc := NewReconciler(
|
||||
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, false, dsw, asw, ad, nsu, fakeRecorder)
|
||||
|
||||
nodes := []k8stypes.NodeName{}
|
||||
for _, n := range test.nodes {
|
||||
dsw.AddNode(n.name, false /*keepTerminatedPodVolumes*/)
|
||||
nodes = append(nodes, n.name)
|
||||
for _, podName := range n.podNames {
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
volumeSpec.PersistentVolume.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||
uid := string(n.name) + "-" + podName // unique UID
|
||||
namespace, name := stringutil.SplitQualifiedName(podName)
|
||||
pod := controllervolumetesting.NewPod(uid, name)
|
||||
pod.Namespace = namespace
|
||||
_, err := dsw.AddPod(types.UniquePodName(uid), pod, volumeSpec, n.name)
|
||||
if err != nil {
|
||||
t.Fatalf("Error adding pod %s to DSW: %s", podName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Act
|
||||
volumes := dsw.GetVolumesToAttach()
|
||||
for _, vol := range volumes {
|
||||
if vol.NodeName == "node1" {
|
||||
rc.(*reconciler).reportMultiAttachError(vol, nodes)
|
||||
}
|
||||
}
|
||||
|
||||
// Assert
|
||||
close(fakeRecorder.Events)
|
||||
index := 0
|
||||
for event := range fakeRecorder.Events {
|
||||
if len(test.expectedEvents) < index {
|
||||
t.Errorf("Test %q: unexpected event received: %s", test.name, event)
|
||||
} else {
|
||||
expectedEvent := test.expectedEvents[index]
|
||||
if expectedEvent != event {
|
||||
t.Errorf("Test %q: event %d: expected %q, got %q", test.name, index, expectedEvent, event)
|
||||
}
|
||||
}
|
||||
index++
|
||||
}
|
||||
for i := index; i < len(test.expectedEvents); i++ {
|
||||
t.Errorf("Test %q: event %d: expected %q, got none", test.name, i, test.expectedEvents[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func waitForMultiAttachErrorOnNode(
|
||||
t *testing.T,
|
||||
attachedNode k8stypes.NodeName,
|
||||
dsow cache.DesiredStateOfWorld) {
|
||||
multAttachCheckFunc := func() (bool, error) {
|
||||
for _, volumeToAttach := range dsow.GetVolumesToAttach() {
|
||||
if volumeToAttach.NodeName != attachedNode {
|
||||
if volumeToAttach.MultiAttachErrorReported {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
t.Logf("Warning: MultiAttach error not yet set on Node. Will retry.")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
err := retryWithExponentialBackOff(100*time.Millisecond, multAttachCheckFunc)
|
||||
if err != nil {
|
||||
t.Fatalf("Timed out waiting for MultiAttach Error to be set on non-attached node")
|
||||
}
|
||||
}
|
||||
|
||||
func waitForNewAttacherCallCount(
|
||||
t *testing.T,
|
||||
expectedCallCount int,
|
||||
fakePlugin *volumetesting.FakeVolumePlugin) {
|
||||
err := retryWithExponentialBackOff(
|
||||
time.Duration(5*time.Millisecond),
|
||||
func() (bool, error) {
|
||||
actualCallCount := fakePlugin.GetNewAttacherCallCount()
|
||||
if actualCallCount >= expectedCallCount {
|
||||
return true, nil
|
||||
}
|
||||
t.Logf(
|
||||
"Warning: Wrong NewAttacherCallCount. Expected: <%v> Actual: <%v>. Will retry.",
|
||||
expectedCallCount,
|
||||
actualCallCount)
|
||||
return false, nil
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"Timed out waiting for NewAttacherCallCount. Expected: <%v> Actual: <%v>",
|
||||
expectedCallCount,
|
||||
fakePlugin.GetNewAttacherCallCount())
|
||||
}
|
||||
}
|
||||
|
||||
func waitForNewDetacherCallCount(
|
||||
t *testing.T,
|
||||
expectedCallCount int,
|
||||
fakePlugin *volumetesting.FakeVolumePlugin) {
|
||||
err := retryWithExponentialBackOff(
|
||||
time.Duration(5*time.Millisecond),
|
||||
func() (bool, error) {
|
||||
actualCallCount := fakePlugin.GetNewDetacherCallCount()
|
||||
if actualCallCount >= expectedCallCount {
|
||||
return true, nil
|
||||
}
|
||||
t.Logf(
|
||||
"Warning: Wrong NewDetacherCallCount. Expected: <%v> Actual: <%v>. Will retry.",
|
||||
expectedCallCount,
|
||||
actualCallCount)
|
||||
return false, nil
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"Timed out waiting for NewDetacherCallCount. Expected: <%v> Actual: <%v>",
|
||||
expectedCallCount,
|
||||
fakePlugin.GetNewDetacherCallCount())
|
||||
}
|
||||
}
|
||||
|
||||
func waitForAttachCallCount(
|
||||
t *testing.T,
|
||||
expectedAttachCallCount int,
|
||||
fakePlugin *volumetesting.FakeVolumePlugin) {
|
||||
if len(fakePlugin.GetAttachers()) == 0 && expectedAttachCallCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
err := retryWithExponentialBackOff(
|
||||
time.Duration(5*time.Millisecond),
|
||||
func() (bool, error) {
|
||||
for i, attacher := range fakePlugin.GetAttachers() {
|
||||
actualCallCount := attacher.GetAttachCallCount()
|
||||
if actualCallCount == expectedAttachCallCount {
|
||||
return true, nil
|
||||
}
|
||||
t.Logf(
|
||||
"Warning: Wrong attacher[%v].GetAttachCallCount(). Expected: <%v> Actual: <%v>. Will try next attacher.",
|
||||
i,
|
||||
expectedAttachCallCount,
|
||||
actualCallCount)
|
||||
}
|
||||
|
||||
t.Logf(
|
||||
"Warning: No attachers have expected AttachCallCount. Expected: <%v>. Will retry.",
|
||||
expectedAttachCallCount)
|
||||
return false, nil
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"No attachers have expected AttachCallCount. Expected: <%v>",
|
||||
expectedAttachCallCount)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForTotalAttachCallCount(
|
||||
t *testing.T,
|
||||
expectedAttachCallCount int,
|
||||
fakePlugin *volumetesting.FakeVolumePlugin) {
|
||||
if len(fakePlugin.GetAttachers()) == 0 && expectedAttachCallCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
err := retryWithExponentialBackOff(
|
||||
time.Duration(5*time.Millisecond),
|
||||
func() (bool, error) {
|
||||
totalCount := 0
|
||||
for _, attacher := range fakePlugin.GetAttachers() {
|
||||
totalCount += attacher.GetAttachCallCount()
|
||||
}
|
||||
if totalCount == expectedAttachCallCount {
|
||||
return true, nil
|
||||
}
|
||||
t.Logf(
|
||||
"Warning: Wrong total GetAttachCallCount(). Expected: <%v> Actual: <%v>. Will retry.",
|
||||
expectedAttachCallCount,
|
||||
totalCount)
|
||||
|
||||
return false, nil
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"Total AttachCallCount does not match expected value. Expected: <%v>",
|
||||
expectedAttachCallCount)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForDetachCallCount(
|
||||
t *testing.T,
|
||||
expectedDetachCallCount int,
|
||||
fakePlugin *volumetesting.FakeVolumePlugin) {
|
||||
if len(fakePlugin.GetDetachers()) == 0 && expectedDetachCallCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
err := retryWithExponentialBackOff(
|
||||
time.Duration(5*time.Millisecond),
|
||||
func() (bool, error) {
|
||||
for i, detacher := range fakePlugin.GetDetachers() {
|
||||
actualCallCount := detacher.GetDetachCallCount()
|
||||
if actualCallCount == expectedDetachCallCount {
|
||||
return true, nil
|
||||
}
|
||||
t.Logf(
|
||||
"Wrong detacher[%v].GetDetachCallCount(). Expected: <%v> Actual: <%v>. Will try next detacher.",
|
||||
i,
|
||||
expectedDetachCallCount,
|
||||
actualCallCount)
|
||||
}
|
||||
|
||||
t.Logf(
|
||||
"Warning: No detachers have expected DetachCallCount. Expected: <%v>. Will retry.",
|
||||
expectedDetachCallCount)
|
||||
return false, nil
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"No detachers have expected DetachCallCount. Expected: <%v>",
|
||||
expectedDetachCallCount)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForTotalDetachCallCount(
|
||||
t *testing.T,
|
||||
expectedDetachCallCount int,
|
||||
fakePlugin *volumetesting.FakeVolumePlugin) {
|
||||
if len(fakePlugin.GetDetachers()) == 0 && expectedDetachCallCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
err := retryWithExponentialBackOff(
|
||||
time.Duration(5*time.Millisecond),
|
||||
func() (bool, error) {
|
||||
totalCount := 0
|
||||
for _, detacher := range fakePlugin.GetDetachers() {
|
||||
totalCount += detacher.GetDetachCallCount()
|
||||
}
|
||||
if totalCount == expectedDetachCallCount {
|
||||
return true, nil
|
||||
}
|
||||
t.Logf(
|
||||
"Warning: Wrong total GetDetachCallCount(). Expected: <%v> Actual: <%v>. Will retry.",
|
||||
expectedDetachCallCount,
|
||||
totalCount)
|
||||
|
||||
return false, nil
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"Total DetachCallCount does not match expected value. Expected: <%v>",
|
||||
expectedDetachCallCount)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForAttachedToNodesCount(
|
||||
t *testing.T,
|
||||
expectedNodeCount int,
|
||||
volumeName v1.UniqueVolumeName,
|
||||
asw cache.ActualStateOfWorld) {
|
||||
|
||||
err := retryWithExponentialBackOff(
|
||||
time.Duration(5*time.Millisecond),
|
||||
func() (bool, error) {
|
||||
count := len(asw.GetNodesForVolume(volumeName))
|
||||
if count == expectedNodeCount {
|
||||
return true, nil
|
||||
}
|
||||
t.Logf(
|
||||
"Warning: Wrong number of nodes having <%v> attached. Expected: <%v> Actual: <%v>. Will retry.",
|
||||
volumeName,
|
||||
expectedNodeCount,
|
||||
count)
|
||||
|
||||
return false, nil
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
count := len(asw.GetNodesForVolume(volumeName))
|
||||
t.Fatalf(
|
||||
"Wrong number of nodes having <%v> attached. Expected: <%v> Actual: <%v>",
|
||||
volumeName,
|
||||
expectedNodeCount,
|
||||
count)
|
||||
}
|
||||
}
|
||||
|
||||
func verifyNewAttacherCallCount(
|
||||
t *testing.T,
|
||||
expectZeroNewAttacherCallCount bool,
|
||||
fakePlugin *volumetesting.FakeVolumePlugin) {
|
||||
|
||||
if expectZeroNewAttacherCallCount &&
|
||||
fakePlugin.GetNewAttacherCallCount() != 0 {
|
||||
t.Fatalf(
|
||||
"Wrong NewAttacherCallCount. Expected: <0> Actual: <%v>",
|
||||
fakePlugin.GetNewAttacherCallCount())
|
||||
}
|
||||
}
|
||||
|
||||
func verifyNewDetacherCallCount(
|
||||
t *testing.T,
|
||||
expectZeroNewDetacherCallCount bool,
|
||||
fakePlugin *volumetesting.FakeVolumePlugin) {
|
||||
|
||||
if expectZeroNewDetacherCallCount &&
|
||||
fakePlugin.GetNewDetacherCallCount() != 0 {
|
||||
t.Fatalf("Wrong NewDetacherCallCount. Expected: <0> Actual: <%v>",
|
||||
fakePlugin.GetNewDetacherCallCount())
|
||||
}
|
||||
}
|
||||
|
||||
func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: initialDuration,
|
||||
Factor: 3,
|
||||
Jitter: 0,
|
||||
Steps: 6,
|
||||
}
|
||||
return wait.ExponentialBackoff(backoff, fn)
|
||||
}
|
Reference in New Issue
Block a user