Fix requeue logic in the common controller
This commit is contained in:
@@ -582,7 +582,6 @@ func (r *snapshotReactor) getChangeCount() int {
|
||||
// waitForIdle waits until all tests, controllers and other goroutines do their
|
||||
// job and no new actions are registered for 10 milliseconds.
|
||||
func (r *snapshotReactor) waitForIdle() {
|
||||
r.ctrl.runningOperations.WaitForCompletion()
|
||||
// Check every 10ms if the controller does something and stop if it's
|
||||
// idle.
|
||||
oldChanges := -1
|
||||
@@ -609,9 +608,6 @@ func (r *snapshotReactor) waitTest(test controllerTest) error {
|
||||
Steps: 10,
|
||||
}
|
||||
err := wait.ExponentialBackoff(backoff, func() (done bool, err error) {
|
||||
// Finish all operations that are in progress
|
||||
r.ctrl.runningOperations.WaitForCompletion()
|
||||
|
||||
// Return 'true' if the reactor reached the expected state
|
||||
err1 := r.checkSnapshots(test.expectedSnapshots)
|
||||
err2 := r.checkContents(test.expectedContents)
|
||||
@@ -757,8 +753,6 @@ func newTestController(kubeClient kubernetes.Interface, clientset clientset.Inte
|
||||
ctrl.snapshotListerSynced = alwaysReady
|
||||
ctrl.classListerSynced = alwaysReady
|
||||
ctrl.pvcListerSynced = alwaysReady
|
||||
ctrl.createSnapshotContentInterval = time.Millisecond * 5
|
||||
ctrl.createSnapshotContentRetryCount = 3
|
||||
|
||||
return ctrl, nil
|
||||
}
|
||||
|
@@ -423,18 +423,10 @@ func (ctrl *csiSnapshotCommonController) syncUnreadySnapshot(snapshot *crdv1.Vol
|
||||
}
|
||||
|
||||
// update snapshot status
|
||||
for i := 0; i < ctrl.createSnapshotContentRetryCount; i++ {
|
||||
klog.V(5).Infof("syncUnreadySnapshot [%s]: trying to update snapshot status", utils.SnapshotKey(snapshot))
|
||||
_, err = ctrl.updateSnapshotStatus(snapshot, newContent)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
klog.V(4).Infof("failed to update snapshot %s status: %v", utils.SnapshotKey(snapshot), err)
|
||||
time.Sleep(ctrl.createSnapshotContentInterval)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
klog.V(5).Infof("syncUnreadySnapshot [%s]: trying to update snapshot status", utils.SnapshotKey(snapshot))
|
||||
if _, err = ctrl.updateSnapshotStatus(snapshot, newContent); err != nil {
|
||||
// update snapshot status failed
|
||||
klog.V(4).Infof("failed to update snapshot %s status: %v", utils.SnapshotKey(snapshot), err)
|
||||
ctrl.updateSnapshotErrorStatusWithEvent(snapshot, v1.EventTypeWarning, "SnapshotStatusUpdateFailed", fmt.Sprintf("Snapshot status update failed, %v", err))
|
||||
return err
|
||||
}
|
||||
@@ -474,17 +466,8 @@ func (ctrl *csiSnapshotCommonController) syncUnreadySnapshot(snapshot *crdv1.Vol
|
||||
}
|
||||
|
||||
// Update snapshot status with BoundVolumeSnapshotContentName
|
||||
for i := 0; i < ctrl.createSnapshotContentRetryCount; i++ {
|
||||
klog.V(5).Infof("syncUnreadySnapshot [%s]: trying to update snapshot status", utils.SnapshotKey(snapshot))
|
||||
_, err = ctrl.updateSnapshotStatus(snapshot, content)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
klog.V(4).Infof("failed to update snapshot %s status: %v", utils.SnapshotKey(snapshot), err)
|
||||
time.Sleep(ctrl.createSnapshotContentInterval)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
klog.V(5).Infof("syncUnreadySnapshot [%s]: trying to update snapshot status", utils.SnapshotKey(snapshot))
|
||||
if _, err = ctrl.updateSnapshotStatus(snapshot, content); err != nil {
|
||||
// update snapshot status failed
|
||||
ctrl.updateSnapshotErrorStatusWithEvent(snapshot, v1.EventTypeWarning, "SnapshotStatusUpdateFailed", fmt.Sprintf("Snapshot status update failed, %v", err))
|
||||
return err
|
||||
@@ -656,24 +639,18 @@ func (ctrl *csiSnapshotCommonController) createSnapshotContent(snapshot *crdv1.V
|
||||
}
|
||||
|
||||
var updateContent *crdv1.VolumeSnapshotContent
|
||||
klog.V(3).Infof("volume snapshot content %#v", snapshotContent)
|
||||
// Try to create the VolumeSnapshotContent object several times
|
||||
for i := 0; i < ctrl.createSnapshotContentRetryCount; i++ {
|
||||
klog.V(5).Infof("createSnapshotContent [%s]: trying to save volume snapshot content %s", utils.SnapshotKey(snapshot), snapshotContent.Name)
|
||||
if updateContent, err = ctrl.clientset.SnapshotV1beta1().VolumeSnapshotContents().Create(context.TODO(), snapshotContent, metav1.CreateOptions{}); err == nil || apierrs.IsAlreadyExists(err) {
|
||||
// Save succeeded.
|
||||
if err != nil {
|
||||
klog.V(3).Infof("volume snapshot content %q for snapshot %q already exists, reusing", snapshotContent.Name, utils.SnapshotKey(snapshot))
|
||||
err = nil
|
||||
updateContent = snapshotContent
|
||||
} else {
|
||||
klog.V(3).Infof("volume snapshot content %q for snapshot %q saved, %v", snapshotContent.Name, utils.SnapshotKey(snapshot), snapshotContent)
|
||||
}
|
||||
break
|
||||
klog.V(5).Infof("volume snapshot content %#v", snapshotContent)
|
||||
// Try to create the VolumeSnapshotContent object
|
||||
klog.V(5).Infof("createSnapshotContent [%s]: trying to save volume snapshot content %s", utils.SnapshotKey(snapshot), snapshotContent.Name)
|
||||
if updateContent, err = ctrl.clientset.SnapshotV1beta1().VolumeSnapshotContents().Create(context.TODO(), snapshotContent, metav1.CreateOptions{}); err == nil || apierrs.IsAlreadyExists(err) {
|
||||
// Save succeeded.
|
||||
if err != nil {
|
||||
klog.V(3).Infof("volume snapshot content %q for snapshot %q already exists, reusing", snapshotContent.Name, utils.SnapshotKey(snapshot))
|
||||
err = nil
|
||||
updateContent = snapshotContent
|
||||
} else {
|
||||
klog.V(3).Infof("volume snapshot content %q for snapshot %q saved, %v", snapshotContent.Name, utils.SnapshotKey(snapshot), snapshotContent)
|
||||
}
|
||||
// Save failed, try again after a while.
|
||||
klog.V(3).Infof("failed to save volume snapshot content %q for snapshot %q: %v", snapshotContent.Name, utils.SnapshotKey(snapshot), err)
|
||||
time.Sleep(ctrl.createSnapshotContentInterval)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -982,19 +959,14 @@ func (ctrl *csiSnapshotCommonController) bindandUpdateVolumeSnapshot(snapshotCon
|
||||
snapshotCopy := snapshotObj.DeepCopy()
|
||||
// update snapshot status
|
||||
var updateSnapshot *crdv1.VolumeSnapshot
|
||||
for i := 0; i < ctrl.createSnapshotContentRetryCount; i++ {
|
||||
klog.V(5).Infof("bindandUpdateVolumeSnapshot [%s]: trying to update snapshot status", utils.SnapshotKey(snapshotCopy))
|
||||
updateSnapshot, err = ctrl.updateSnapshotStatus(snapshotCopy, snapshotContent)
|
||||
if err == nil {
|
||||
snapshotCopy = updateSnapshot
|
||||
break
|
||||
}
|
||||
klog.V(4).Infof("failed to update snapshot %s status: %v", utils.SnapshotKey(snapshot), err)
|
||||
time.Sleep(ctrl.createSnapshotContentInterval)
|
||||
klog.V(5).Infof("bindandUpdateVolumeSnapshot [%s]: trying to update snapshot status", utils.SnapshotKey(snapshotCopy))
|
||||
updateSnapshot, err = ctrl.updateSnapshotStatus(snapshotCopy, snapshotContent)
|
||||
if err == nil {
|
||||
snapshotCopy = updateSnapshot
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// update snapshot status failed
|
||||
klog.V(4).Infof("failed to update snapshot %s status: %v", utils.SnapshotKey(snapshot), err)
|
||||
ctrl.updateSnapshotErrorStatusWithEvent(snapshotCopy, v1.EventTypeWarning, "SnapshotStatusUpdateFailed", fmt.Sprintf("Snapshot status update failed, %v", err))
|
||||
return nil, err
|
||||
}
|
||||
|
@@ -39,15 +39,8 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap"
|
||||
)
|
||||
|
||||
// Number of retries when we create a VolumeSnapshotContent object
|
||||
const createSnapshotContentRetryCount = 5
|
||||
|
||||
// Interval between retries when we create a VolumeSnapshotContent object
|
||||
const createSnapshotContentInterval = 10 * time.Second
|
||||
|
||||
type csiSnapshotCommonController struct {
|
||||
clientset clientset.Interface
|
||||
client kubernetes.Interface
|
||||
@@ -67,12 +60,7 @@ type csiSnapshotCommonController struct {
|
||||
snapshotStore cache.Store
|
||||
contentStore cache.Store
|
||||
|
||||
// Map of scheduled/running operations.
|
||||
runningOperations goroutinemap.GoRoutineMap
|
||||
|
||||
createSnapshotContentRetryCount int
|
||||
createSnapshotContentInterval time.Duration
|
||||
resyncPeriod time.Duration
|
||||
resyncPeriod time.Duration
|
||||
}
|
||||
|
||||
// NewCSISnapshotController returns a new *csiSnapshotCommonController
|
||||
@@ -92,17 +80,14 @@ func NewCSISnapshotCommonController(
|
||||
eventRecorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: fmt.Sprintf("snapshot-controller")})
|
||||
|
||||
ctrl := &csiSnapshotCommonController{
|
||||
clientset: clientset,
|
||||
client: client,
|
||||
eventRecorder: eventRecorder,
|
||||
runningOperations: goroutinemap.NewGoRoutineMap(true),
|
||||
createSnapshotContentRetryCount: createSnapshotContentRetryCount,
|
||||
createSnapshotContentInterval: createSnapshotContentInterval,
|
||||
resyncPeriod: resyncPeriod,
|
||||
snapshotStore: cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc),
|
||||
contentStore: cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc),
|
||||
snapshotQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "snapshot-controller-snapshot"),
|
||||
contentQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "snapshot-controller-content"),
|
||||
clientset: clientset,
|
||||
client: client,
|
||||
eventRecorder: eventRecorder,
|
||||
resyncPeriod: resyncPeriod,
|
||||
snapshotStore: cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc),
|
||||
contentStore: cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc),
|
||||
snapshotQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "snapshot-controller-snapshot"),
|
||||
contentQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "snapshot-controller-content"),
|
||||
}
|
||||
|
||||
ctrl.pvcLister = pvcInformer.Lister()
|
||||
@@ -192,134 +177,142 @@ func (ctrl *csiSnapshotCommonController) enqueueContentWork(obj interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// snapshotWorker processes items from snapshotQueue. It must run only once,
|
||||
// syncSnapshot is not assured to be reentrant.
|
||||
// snapshotWorker is the main worker for VolumeSnapshots.
|
||||
func (ctrl *csiSnapshotCommonController) snapshotWorker() {
|
||||
workFunc := func() bool {
|
||||
keyObj, quit := ctrl.snapshotQueue.Get()
|
||||
if quit {
|
||||
return true
|
||||
}
|
||||
defer ctrl.snapshotQueue.Done(keyObj)
|
||||
key := keyObj.(string)
|
||||
klog.V(5).Infof("snapshotWorker[%s]", key)
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
klog.V(5).Infof("snapshotWorker: snapshot namespace [%s] name [%s]", namespace, name)
|
||||
if err != nil {
|
||||
klog.Errorf("error getting namespace & name of snapshot %q to get snapshot from informer: %v", key, err)
|
||||
return false
|
||||
}
|
||||
snapshot, err := ctrl.snapshotLister.VolumeSnapshots(namespace).Get(name)
|
||||
if err == nil {
|
||||
// The volume snapshot still exists in informer cache, the event must have
|
||||
// been add/update/sync
|
||||
newSnapshot, err := ctrl.checkAndUpdateSnapshotClass(snapshot)
|
||||
if err == nil || (newSnapshot.ObjectMeta.DeletionTimestamp != nil && errors.IsNotFound(err)) {
|
||||
// If the VolumeSnapshotClass is not found, we still need to process an update
|
||||
// so that syncSnapshot can delete the snapshot, should it still exist in the
|
||||
// cluster after it's been removed from the informer cache
|
||||
klog.V(5).Infof("updating snapshot %q; snapshotClass may have already been removed", key)
|
||||
ctrl.updateSnapshot(newSnapshot)
|
||||
}
|
||||
return false
|
||||
}
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
klog.V(2).Infof("error getting snapshot %q from informer: %v", key, err)
|
||||
return false
|
||||
}
|
||||
// The snapshot is not in informer cache, the event must have been "delete"
|
||||
vsObj, found, err := ctrl.snapshotStore.GetByKey(key)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("error getting snapshot %q from cache: %v", key, err)
|
||||
return false
|
||||
}
|
||||
if !found {
|
||||
// The controller has already processed the delete event and
|
||||
// deleted the snapshot from its cache
|
||||
klog.V(2).Infof("deletion of snapshot %q was already processed", key)
|
||||
return false
|
||||
}
|
||||
snapshot, ok := vsObj.(*crdv1.VolumeSnapshot)
|
||||
if !ok {
|
||||
klog.Errorf("expected vs, got %+v", vsObj)
|
||||
return false
|
||||
}
|
||||
newSnapshot, err := ctrl.checkAndUpdateSnapshotClass(snapshot)
|
||||
if err == nil || errors.IsNotFound(err) {
|
||||
// We should still handle deletion events even if the VolumeSnapshotClass
|
||||
// is not found in the cluster
|
||||
klog.V(5).Infof("deleting snapshot %q; snapshotClass may have already been removed", key)
|
||||
ctrl.deleteSnapshot(newSnapshot)
|
||||
}
|
||||
return false
|
||||
keyObj, quit := ctrl.snapshotQueue.Get()
|
||||
if quit {
|
||||
return
|
||||
}
|
||||
defer ctrl.snapshotQueue.Done(keyObj)
|
||||
|
||||
for {
|
||||
if quit := workFunc(); quit {
|
||||
klog.Infof("snapshot worker queue shutting down")
|
||||
return
|
||||
}
|
||||
if err := ctrl.syncSnapshotByKey(keyObj.(string)); err != nil {
|
||||
// Rather than wait for a full resync, re-add the key to the
|
||||
// queue to be processed.
|
||||
ctrl.snapshotQueue.AddRateLimited(keyObj)
|
||||
klog.V(4).Infof("Failed to sync snapshot %q, will retry again: %v", keyObj.(string), err)
|
||||
} else {
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
ctrl.snapshotQueue.Forget(keyObj)
|
||||
}
|
||||
}
|
||||
|
||||
// contentWorker processes items from contentQueue. It must run only once,
|
||||
// syncContent is not assured to be reentrant.
|
||||
func (ctrl *csiSnapshotCommonController) contentWorker() {
|
||||
workFunc := func() bool {
|
||||
keyObj, quit := ctrl.contentQueue.Get()
|
||||
if quit {
|
||||
return true
|
||||
}
|
||||
defer ctrl.contentQueue.Done(keyObj)
|
||||
key := keyObj.(string)
|
||||
klog.V(5).Infof("contentWorker[%s]", key)
|
||||
// syncSnapshotByKey processes a VolumeSnapshot request.
|
||||
func (ctrl *csiSnapshotCommonController) syncSnapshotByKey(key string) error {
|
||||
klog.V(5).Infof("syncSnapshotByKey[%s]", key)
|
||||
|
||||
_, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("error getting name of snapshotContent %q to get snapshotContent from informer: %v", key, err)
|
||||
return false
|
||||
}
|
||||
content, err := ctrl.contentLister.Get(name)
|
||||
// The content still exists in informer cache, the event must have
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
klog.V(5).Infof("snapshotWorker: snapshot namespace [%s] name [%s]", namespace, name)
|
||||
if err != nil {
|
||||
klog.Errorf("error getting namespace & name of snapshot %q to get snapshot from informer: %v", key, err)
|
||||
return nil
|
||||
}
|
||||
snapshot, err := ctrl.snapshotLister.VolumeSnapshots(namespace).Get(name)
|
||||
if err == nil {
|
||||
// The volume snapshot still exists in informer cache, the event must have
|
||||
// been add/update/sync
|
||||
if err == nil {
|
||||
ctrl.updateContent(content)
|
||||
return false
|
||||
newSnapshot, err := ctrl.checkAndUpdateSnapshotClass(snapshot)
|
||||
if err == nil || (newSnapshot.ObjectMeta.DeletionTimestamp != nil && errors.IsNotFound(err)) {
|
||||
// If the VolumeSnapshotClass is not found, we still need to process an update
|
||||
// so that syncSnapshot can delete the snapshot, should it still exist in the
|
||||
// cluster after it's been removed from the informer cache
|
||||
if newSnapshot.ObjectMeta.DeletionTimestamp != nil && errors.IsNotFound(err) {
|
||||
klog.V(5).Infof("Snapshot %q is being deleted. SnapshotClass has already been removed", key)
|
||||
}
|
||||
klog.V(5).Infof("Updating snapshot %q", key)
|
||||
return ctrl.updateSnapshot(newSnapshot)
|
||||
}
|
||||
if !errors.IsNotFound(err) {
|
||||
klog.V(2).Infof("error getting content %q from informer: %v", key, err)
|
||||
return false
|
||||
}
|
||||
|
||||
// The content is not in informer cache, the event must have been
|
||||
// "delete"
|
||||
contentObj, found, err := ctrl.contentStore.GetByKey(key)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("error getting content %q from cache: %v", key, err)
|
||||
return false
|
||||
}
|
||||
if !found {
|
||||
// The controller has already processed the delete event and
|
||||
// deleted the content from its cache
|
||||
klog.V(2).Infof("deletion of content %q was already processed", key)
|
||||
return false
|
||||
}
|
||||
content, ok := contentObj.(*crdv1.VolumeSnapshotContent)
|
||||
if !ok {
|
||||
klog.Errorf("expected content, got %+v", content)
|
||||
return false
|
||||
}
|
||||
ctrl.deleteContent(content)
|
||||
return false
|
||||
return err
|
||||
}
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
klog.V(2).Infof("error getting snapshot %q from informer: %v", key, err)
|
||||
return err
|
||||
}
|
||||
// The snapshot is not in informer cache, the event must have been "delete"
|
||||
vsObj, found, err := ctrl.snapshotStore.GetByKey(key)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("error getting snapshot %q from cache: %v", key, err)
|
||||
return nil
|
||||
}
|
||||
if !found {
|
||||
// The controller has already processed the delete event and
|
||||
// deleted the snapshot from its cache
|
||||
klog.V(2).Infof("deletion of snapshot %q was already processed", key)
|
||||
return nil
|
||||
}
|
||||
snapshot, ok := vsObj.(*crdv1.VolumeSnapshot)
|
||||
if !ok {
|
||||
klog.Errorf("expected vs, got %+v", vsObj)
|
||||
return nil
|
||||
}
|
||||
|
||||
for {
|
||||
if quit := workFunc(); quit {
|
||||
klog.Infof("content worker queue shutting down")
|
||||
return
|
||||
}
|
||||
klog.V(5).Infof("deleting snapshot %q", key)
|
||||
ctrl.deleteSnapshot(snapshot)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// contentWorker is the main worker for VolumeSnapshotContent.
|
||||
func (ctrl *csiSnapshotCommonController) contentWorker() {
|
||||
keyObj, quit := ctrl.contentQueue.Get()
|
||||
if quit {
|
||||
return
|
||||
}
|
||||
defer ctrl.contentQueue.Done(keyObj)
|
||||
|
||||
if err := ctrl.syncContentByKey(keyObj.(string)); err != nil {
|
||||
// Rather than wait for a full resync, re-add the key to the
|
||||
// queue to be processed.
|
||||
ctrl.contentQueue.AddRateLimited(keyObj)
|
||||
klog.V(4).Infof("Failed to sync content %q, will retry again: %v", keyObj.(string), err)
|
||||
} else {
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
ctrl.contentQueue.Forget(keyObj)
|
||||
}
|
||||
}
|
||||
|
||||
// syncContentByKey processes a VolumeSnapshotContent request.
|
||||
func (ctrl *csiSnapshotCommonController) syncContentByKey(key string) error {
|
||||
klog.V(5).Infof("syncContentByKey[%s]", key)
|
||||
|
||||
_, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("error getting name of snapshotContent %q to get snapshotContent from informer: %v", key, err)
|
||||
return nil
|
||||
}
|
||||
content, err := ctrl.contentLister.Get(name)
|
||||
// The content still exists in informer cache, the event must have
|
||||
// been add/update/sync
|
||||
if err == nil {
|
||||
// If error occurs we add this item back to the queue
|
||||
return ctrl.updateContent(content)
|
||||
}
|
||||
if !errors.IsNotFound(err) {
|
||||
klog.V(2).Infof("error getting content %q from informer: %v", key, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// The content is not in informer cache, the event must have been
|
||||
// "delete"
|
||||
contentObj, found, err := ctrl.contentStore.GetByKey(key)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("error getting content %q from cache: %v", key, err)
|
||||
return nil
|
||||
}
|
||||
if !found {
|
||||
// The controller has already processed the delete event and
|
||||
// deleted the content from its cache
|
||||
klog.V(2).Infof("deletion of content %q was already processed", key)
|
||||
return nil
|
||||
}
|
||||
content, ok := contentObj.(*crdv1.VolumeSnapshotContent)
|
||||
if !ok {
|
||||
klog.Errorf("expected content, got %+v", content)
|
||||
return nil
|
||||
}
|
||||
ctrl.deleteContent(content)
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkAndUpdateSnapshotClass gets the VolumeSnapshotClass from VolumeSnapshot. If it is not set,
|
||||
@@ -357,7 +350,7 @@ func (ctrl *csiSnapshotCommonController) checkAndUpdateSnapshotClass(snapshot *c
|
||||
|
||||
// updateSnapshot runs in worker thread and handles "snapshot added",
|
||||
// "snapshot updated" and "periodic sync" events.
|
||||
func (ctrl *csiSnapshotCommonController) updateSnapshot(snapshot *crdv1.VolumeSnapshot) {
|
||||
func (ctrl *csiSnapshotCommonController) updateSnapshot(snapshot *crdv1.VolumeSnapshot) error {
|
||||
// Store the new snapshot version in the cache and do not process it if this is
|
||||
// an old version.
|
||||
klog.V(5).Infof("updateSnapshot %q", utils.SnapshotKey(snapshot))
|
||||
@@ -366,23 +359,25 @@ func (ctrl *csiSnapshotCommonController) updateSnapshot(snapshot *crdv1.VolumeSn
|
||||
klog.Errorf("%v", err)
|
||||
}
|
||||
if !newSnapshot {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
err = ctrl.syncSnapshot(snapshot)
|
||||
if err != nil {
|
||||
if errors.IsConflict(err) {
|
||||
// Version conflict error happens quite often and the controller
|
||||
// recovers from it easily.
|
||||
klog.V(3).Infof("could not sync claim %q: %+v", utils.SnapshotKey(snapshot), err)
|
||||
klog.V(3).Infof("could not sync snapshot %q: %+v", utils.SnapshotKey(snapshot), err)
|
||||
} else {
|
||||
klog.Errorf("could not sync volume %q: %+v", utils.SnapshotKey(snapshot), err)
|
||||
klog.Errorf("could not sync snapshot %q: %+v", utils.SnapshotKey(snapshot), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateContent runs in worker thread and handles "content added",
|
||||
// "content updated" and "periodic sync" events.
|
||||
func (ctrl *csiSnapshotCommonController) updateContent(content *crdv1.VolumeSnapshotContent) {
|
||||
func (ctrl *csiSnapshotCommonController) updateContent(content *crdv1.VolumeSnapshotContent) error {
|
||||
// Store the new content version in the cache and do not process it if this is
|
||||
// an old version.
|
||||
new, err := ctrl.storeContentUpdate(content)
|
||||
@@ -390,7 +385,7 @@ func (ctrl *csiSnapshotCommonController) updateContent(content *crdv1.VolumeSnap
|
||||
klog.Errorf("%v", err)
|
||||
}
|
||||
if !new {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
err = ctrl.syncContent(content)
|
||||
if err != nil {
|
||||
@@ -401,7 +396,9 @@ func (ctrl *csiSnapshotCommonController) updateContent(content *crdv1.VolumeSnap
|
||||
} else {
|
||||
klog.Errorf("could not sync content %q: %+v", content.Name, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteSnapshot runs in worker thread and handles "snapshot deleted" event.
|
||||
|
@@ -107,24 +107,6 @@ func TestCreateSnapshotSync(t *testing.T) {
|
||||
expectSuccess: false,
|
||||
test: testSyncSnapshot,
|
||||
},
|
||||
|
||||
{
|
||||
name: "7-2 - fail to update snapshot reports warning event",
|
||||
initialContents: newContentArrayWithReadyToUse("snapcontent-snapuid7-2", "snapuid7-2", "snap7-2", "sid7-2", classGold, "", "pv-handle7-2", deletionPolicy, nil, nil, &True, false),
|
||||
expectedContents: newContentArrayWithReadyToUse("snapcontent-snapuid7-2", "snapuid7-2", "snap7-2", "sid7-2", classGold, "", "pv-handle7-2", deletionPolicy, nil, nil, &True, false),
|
||||
initialSnapshots: newSnapshotArray("snap7-2", "snapuid7-2", "claim7-2", "", classGold, "snapcontent-snapuid7-2", &False, nil, nil, nil, false, true, nil),
|
||||
expectedSnapshots: newSnapshotArray("snap7-2", "snapuid7-2", "claim7-2", "", classGold, "snapcontent-snapuid7-2", &False, nil, nil, newVolumeError("Snapshot status update failed, snapshot controller failed to update default/snap7-2 on API server: mock update error"), false, true, nil),
|
||||
initialClaims: newClaimArray("claim7-2", "pvc-uid7-2", "1Gi", "volume7-2", v1.ClaimBound, &classGold),
|
||||
initialVolumes: newVolumeArray("volume7-2", "pv-uid7-2", "pv-handle7-2", "1Gi", "pvc-uid7-2", "claim7-2", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classGold),
|
||||
expectedEvents: []string{"Warning SnapshotStatusUpdateFailed"},
|
||||
errors: []reactorError{
|
||||
// Inject error to the forth client.VolumesnapshotV1beta1().VolumeSnapshots().Update call.
|
||||
// All other calls will succeed.
|
||||
{"update", "volumesnapshots", errors.New("mock update error")},
|
||||
{"update", "volumesnapshots", errors.New("mock update error")},
|
||||
{"update", "volumesnapshots", errors.New("mock update error")},
|
||||
}, test: testSyncSnapshot,
|
||||
},
|
||||
{
|
||||
name: "7-3 - fail to create snapshot without snapshot class ",
|
||||
initialContents: nocontents,
|
||||
@@ -193,23 +175,6 @@ func TestCreateSnapshotSync(t *testing.T) {
|
||||
expectSuccess: false,
|
||||
test: testSyncSnapshot,
|
||||
},
|
||||
{
|
||||
name: "7-8 - fail create snapshot due to cannot update snapshot status",
|
||||
initialContents: nocontents,
|
||||
expectedContents: newContentArrayNoStatus("snapcontent-snapuid7-8", "snapuid7-8", "snap7-8", "sid7-8", classGold, "", "pv-handle7-8", deletionPolicy, nil, nil, false, false),
|
||||
initialSnapshots: newSnapshotArray("snap7-8", "snapuid7-8", "claim7-8", "", classGold, "", &False, nil, nil, nil, false, true, nil),
|
||||
expectedSnapshots: newSnapshotArray("snap7-8", "snapuid7-8", "claim7-8", "", classGold, "", &False, nil, nil, newVolumeError("Snapshot status update failed, snapshot controller failed to update default/snap7-8 on API server: mock update error"), false, true, nil),
|
||||
initialClaims: newClaimArray("claim7-8", "pvc-uid7-8", "1Gi", "volume7-8", v1.ClaimBound, &classEmpty),
|
||||
initialVolumes: newVolumeArray("volume7-8", "pv-uid7-8", "pv-handle7-8", "1Gi", "pvc-uid7-8", "claim7-8", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty),
|
||||
errors: []reactorError{
|
||||
{"update", "volumesnapshots", errors.New("mock update error")},
|
||||
{"update", "volumesnapshots", errors.New("mock update error")},
|
||||
{"update", "volumesnapshots", errors.New("mock update error")},
|
||||
},
|
||||
expectedEvents: []string{"Normal CreatingSnapshot"},
|
||||
expectSuccess: false,
|
||||
test: testSyncSnapshot,
|
||||
},
|
||||
{
|
||||
name: "7-9 - fail create snapshot due to cannot update snapshot status, and failure cannot be recorded either due to additional status update failure.",
|
||||
initialContents: nocontents,
|
||||
@@ -239,7 +204,8 @@ func TestCreateSnapshotSync(t *testing.T) {
|
||||
test: testSyncSnapshot,
|
||||
},
|
||||
{
|
||||
// TODO(xiangqian): this test case needs to be revisited the scenario
|
||||
// TODO(xiangqian): this test case needs to be
|
||||
// revisited the scenario
|
||||
// of VolumeSnapshotContent saving failure. Since there will be no content object
|
||||
// in API server, it could potentially cause leaking issue
|
||||
name: "7-11 - fail create snapshot due to cannot save snapshot content",
|
||||
|
Reference in New Issue
Block a user