Bumping k8s dependencies to 1.13
This commit is contained in:
166
vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go
generated
vendored
166
vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go
generated
vendored
@@ -23,7 +23,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -31,17 +31,27 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/util/integer"
|
||||
"k8s.io/client-go/util/retry"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
apiv1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/kubectl/util"
|
||||
)
|
||||
|
||||
func newInt32Ptr(val int) *int32 {
|
||||
ret := int32(val)
|
||||
return &ret
|
||||
}
|
||||
|
||||
func valOrZero(val *int32) int32 {
|
||||
if val == nil {
|
||||
return int32(0)
|
||||
}
|
||||
return *val
|
||||
}
|
||||
|
||||
const (
|
||||
kubectlAnnotationPrefix = "kubectl.kubernetes.io/"
|
||||
sourceIdAnnotation = kubectlAnnotationPrefix + "update-source-id"
|
||||
@@ -55,10 +65,10 @@ type RollingUpdaterConfig struct {
|
||||
// Out is a writer for progress output.
|
||||
Out io.Writer
|
||||
// OldRC is an existing controller to be replaced.
|
||||
OldRc *api.ReplicationController
|
||||
OldRc *corev1.ReplicationController
|
||||
// NewRc is a controller that will take ownership of updated pods (will be
|
||||
// created if needed).
|
||||
NewRc *api.ReplicationController
|
||||
NewRc *corev1.ReplicationController
|
||||
// UpdatePeriod is the time to wait between individual pod updates.
|
||||
UpdatePeriod time.Duration
|
||||
// Interval is the time to wait between polling controller status after
|
||||
@@ -96,7 +106,7 @@ type RollingUpdaterConfig struct {
|
||||
// OnProgress is invoked if set during each scale cycle, to allow the caller to perform additional logic or
|
||||
// abort the scale. If an error is returned the cleanup method will not be invoked. The percentage value
|
||||
// is a synthetic "progress" calculation that represents the approximate percentage completion.
|
||||
OnProgress func(oldRc, newRc *api.ReplicationController, percentage int) error
|
||||
OnProgress func(oldRc, newRc *corev1.ReplicationController, percentage int) error
|
||||
}
|
||||
|
||||
// RollingUpdaterCleanupPolicy is a cleanup action to take after the
|
||||
@@ -116,26 +126,26 @@ const (
|
||||
// RollingUpdater provides methods for updating replicated pods in a predictable,
|
||||
// fault-tolerant way.
|
||||
type RollingUpdater struct {
|
||||
rcClient coreclient.ReplicationControllersGetter
|
||||
podClient coreclient.PodsGetter
|
||||
rcClient corev1client.ReplicationControllersGetter
|
||||
podClient corev1client.PodsGetter
|
||||
scaleClient scaleclient.ScalesGetter
|
||||
// Namespace for resources
|
||||
ns string
|
||||
// scaleAndWait scales a controller and returns its updated state.
|
||||
scaleAndWait func(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error)
|
||||
scaleAndWait func(rc *corev1.ReplicationController, retry *RetryParams, wait *RetryParams) (*corev1.ReplicationController, error)
|
||||
//getOrCreateTargetController gets and validates an existing controller or
|
||||
//makes a new one.
|
||||
getOrCreateTargetController func(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error)
|
||||
getOrCreateTargetController func(controller *corev1.ReplicationController, sourceId string) (*corev1.ReplicationController, bool, error)
|
||||
// cleanup performs post deployment cleanup tasks for newRc and oldRc.
|
||||
cleanup func(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error
|
||||
cleanup func(oldRc, newRc *corev1.ReplicationController, config *RollingUpdaterConfig) error
|
||||
// getReadyPods returns the amount of old and new ready pods.
|
||||
getReadyPods func(oldRc, newRc *api.ReplicationController, minReadySeconds int32) (int32, int32, error)
|
||||
getReadyPods func(oldRc, newRc *corev1.ReplicationController, minReadySeconds int32) (int32, int32, error)
|
||||
// nowFn returns the current time used to calculate the minReadySeconds
|
||||
nowFn func() metav1.Time
|
||||
}
|
||||
|
||||
// NewRollingUpdater creates a RollingUpdater from a client.
|
||||
func NewRollingUpdater(namespace string, rcClient coreclient.ReplicationControllersGetter, podClient coreclient.PodsGetter, sc scaleclient.ScalesGetter) *RollingUpdater {
|
||||
func NewRollingUpdater(namespace string, rcClient corev1client.ReplicationControllersGetter, podClient corev1client.PodsGetter, sc scaleclient.ScalesGetter) *RollingUpdater {
|
||||
updater := &RollingUpdater{
|
||||
rcClient: rcClient,
|
||||
podClient: podClient,
|
||||
@@ -203,8 +213,8 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
originReplicas := strconv.Itoa(int(existing.Spec.Replicas))
|
||||
applyUpdate := func(rc *api.ReplicationController) {
|
||||
originReplicas := strconv.Itoa(int(valOrZero(existing.Spec.Replicas)))
|
||||
applyUpdate := func(rc *corev1.ReplicationController) {
|
||||
if rc.Annotations == nil {
|
||||
rc.Annotations = map[string]string{}
|
||||
}
|
||||
@@ -231,15 +241,15 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error {
|
||||
// the effective scale of the old RC regardless of the configuration
|
||||
// (equivalent to 100% maxUnavailable).
|
||||
if desired == 0 {
|
||||
maxUnavailable = oldRc.Spec.Replicas
|
||||
maxUnavailable = valOrZero(oldRc.Spec.Replicas)
|
||||
minAvailable = 0
|
||||
}
|
||||
|
||||
fmt.Fprintf(out, "Scaling up %s from %d to %d, scaling down %s from %d to 0 (keep %d pods available, don't exceed %d pods)\n",
|
||||
newRc.Name, newRc.Spec.Replicas, desired, oldRc.Name, oldRc.Spec.Replicas, minAvailable, desired+maxSurge)
|
||||
newRc.Name, valOrZero(newRc.Spec.Replicas), desired, oldRc.Name, valOrZero(oldRc.Spec.Replicas), minAvailable, desired+maxSurge)
|
||||
|
||||
// give a caller incremental notification and allow them to exit early
|
||||
goal := desired - newRc.Spec.Replicas
|
||||
goal := desired - valOrZero(newRc.Spec.Replicas)
|
||||
if goal < 0 {
|
||||
goal = -goal
|
||||
}
|
||||
@@ -247,7 +257,7 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error {
|
||||
if config.OnProgress == nil {
|
||||
return nil
|
||||
}
|
||||
progress := desired - newRc.Spec.Replicas
|
||||
progress := desired - valOrZero(newRc.Spec.Replicas)
|
||||
if progress < 0 {
|
||||
progress = -progress
|
||||
}
|
||||
@@ -261,10 +271,10 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error {
|
||||
// Scale newRc and oldRc until newRc has the desired number of replicas and
|
||||
// oldRc has 0 replicas.
|
||||
progressDeadline := time.Now().UnixNano() + config.Timeout.Nanoseconds()
|
||||
for newRc.Spec.Replicas != desired || oldRc.Spec.Replicas != 0 {
|
||||
for valOrZero(newRc.Spec.Replicas) != desired || valOrZero(oldRc.Spec.Replicas) != 0 {
|
||||
// Store the existing replica counts for progress timeout tracking.
|
||||
newReplicas := newRc.Spec.Replicas
|
||||
oldReplicas := oldRc.Spec.Replicas
|
||||
newReplicas := valOrZero(newRc.Spec.Replicas)
|
||||
oldReplicas := valOrZero(oldRc.Spec.Replicas)
|
||||
|
||||
// Scale up as much as possible.
|
||||
scaledRc, err := r.scaleUp(newRc, oldRc, desired, maxSurge, maxUnavailable, scaleRetryParams, config)
|
||||
@@ -295,7 +305,7 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error {
|
||||
|
||||
// If we are making progress, continue to advance the progress deadline.
|
||||
// Otherwise, time out with an error.
|
||||
progressMade := (newRc.Spec.Replicas != newReplicas) || (oldRc.Spec.Replicas != oldReplicas)
|
||||
progressMade := (valOrZero(newRc.Spec.Replicas) != newReplicas) || (valOrZero(oldRc.Spec.Replicas) != oldReplicas)
|
||||
if progressMade {
|
||||
progressDeadline = time.Now().UnixNano() + config.Timeout.Nanoseconds()
|
||||
} else if time.Now().UnixNano() > progressDeadline {
|
||||
@@ -315,29 +325,30 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error {
|
||||
// scaleUp scales up newRc to desired by whatever increment is possible given
|
||||
// the configured surge threshold. scaleUp will safely no-op as necessary when
|
||||
// it detects redundancy or other relevant conditions.
|
||||
func (r *RollingUpdater) scaleUp(newRc, oldRc *api.ReplicationController, desired, maxSurge, maxUnavailable int32, scaleRetryParams *RetryParams, config *RollingUpdaterConfig) (*api.ReplicationController, error) {
|
||||
func (r *RollingUpdater) scaleUp(newRc, oldRc *corev1.ReplicationController, desired, maxSurge, maxUnavailable int32, scaleRetryParams *RetryParams, config *RollingUpdaterConfig) (*corev1.ReplicationController, error) {
|
||||
// If we're already at the desired, do nothing.
|
||||
if newRc.Spec.Replicas == desired {
|
||||
if valOrZero(newRc.Spec.Replicas) == desired {
|
||||
return newRc, nil
|
||||
}
|
||||
|
||||
// Scale up as far as we can based on the surge limit.
|
||||
increment := (desired + maxSurge) - (oldRc.Spec.Replicas + newRc.Spec.Replicas)
|
||||
increment := (desired + maxSurge) - (valOrZero(oldRc.Spec.Replicas) + valOrZero(newRc.Spec.Replicas))
|
||||
// If the old is already scaled down, go ahead and scale all the way up.
|
||||
if oldRc.Spec.Replicas == 0 {
|
||||
increment = desired - newRc.Spec.Replicas
|
||||
if valOrZero(oldRc.Spec.Replicas) == 0 {
|
||||
increment = desired - valOrZero(newRc.Spec.Replicas)
|
||||
}
|
||||
// We can't scale up without violating the surge limit, so do nothing.
|
||||
if increment <= 0 {
|
||||
return newRc, nil
|
||||
}
|
||||
// Increase the replica count, and deal with fenceposts.
|
||||
newRc.Spec.Replicas += increment
|
||||
if newRc.Spec.Replicas > desired {
|
||||
newRc.Spec.Replicas = desired
|
||||
nextVal := valOrZero(newRc.Spec.Replicas) + increment
|
||||
newRc.Spec.Replicas = &nextVal
|
||||
if valOrZero(newRc.Spec.Replicas) > desired {
|
||||
newRc.Spec.Replicas = &desired
|
||||
}
|
||||
// Perform the scale-up.
|
||||
fmt.Fprintf(config.Out, "Scaling %s up to %d\n", newRc.Name, newRc.Spec.Replicas)
|
||||
fmt.Fprintf(config.Out, "Scaling %s up to %d\n", newRc.Name, valOrZero(newRc.Spec.Replicas))
|
||||
scaledRc, err := r.scaleAndWait(newRc, scaleRetryParams, scaleRetryParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -348,9 +359,9 @@ func (r *RollingUpdater) scaleUp(newRc, oldRc *api.ReplicationController, desire
|
||||
// scaleDown scales down oldRc to 0 at whatever decrement possible given the
|
||||
// thresholds defined on the config. scaleDown will safely no-op as necessary
|
||||
// when it detects redundancy or other relevant conditions.
|
||||
func (r *RollingUpdater) scaleDown(newRc, oldRc *api.ReplicationController, desired, minAvailable, maxUnavailable, maxSurge int32, config *RollingUpdaterConfig) (*api.ReplicationController, error) {
|
||||
func (r *RollingUpdater) scaleDown(newRc, oldRc *corev1.ReplicationController, desired, minAvailable, maxUnavailable, maxSurge int32, config *RollingUpdaterConfig) (*corev1.ReplicationController, error) {
|
||||
// Already scaled down; do nothing.
|
||||
if oldRc.Spec.Replicas == 0 {
|
||||
if valOrZero(oldRc.Spec.Replicas) == 0 {
|
||||
return oldRc, nil
|
||||
}
|
||||
// Get ready pods. We shouldn't block, otherwise in case both old and new
|
||||
@@ -363,8 +374,8 @@ func (r *RollingUpdater) scaleDown(newRc, oldRc *api.ReplicationController, desi
|
||||
// The old controller is considered as part of the total because we want to
|
||||
// maintain minimum availability even with a volatile old controller.
|
||||
// Scale down as much as possible while maintaining minimum availability
|
||||
allPods := oldRc.Spec.Replicas + newRc.Spec.Replicas
|
||||
newUnavailable := newRc.Spec.Replicas - newAvailable
|
||||
allPods := valOrZero(oldRc.Spec.Replicas) + valOrZero(newRc.Spec.Replicas)
|
||||
newUnavailable := valOrZero(newRc.Spec.Replicas) - newAvailable
|
||||
decrement := allPods - minAvailable - newUnavailable
|
||||
// The decrement normally shouldn't drop below 0 because the available count
|
||||
// always starts below the old replica count, but the old replica count can
|
||||
@@ -379,17 +390,18 @@ func (r *RollingUpdater) scaleDown(newRc, oldRc *api.ReplicationController, desi
|
||||
return oldRc, nil
|
||||
}
|
||||
// Reduce the replica count, and deal with fenceposts.
|
||||
oldRc.Spec.Replicas -= decrement
|
||||
if oldRc.Spec.Replicas < 0 {
|
||||
oldRc.Spec.Replicas = 0
|
||||
nextOldVal := valOrZero(oldRc.Spec.Replicas) - decrement
|
||||
oldRc.Spec.Replicas = &nextOldVal
|
||||
if valOrZero(oldRc.Spec.Replicas) < 0 {
|
||||
oldRc.Spec.Replicas = newInt32Ptr(0)
|
||||
}
|
||||
// If the new is already fully scaled and available up to the desired size, go
|
||||
// ahead and scale old all the way down.
|
||||
if newRc.Spec.Replicas == desired && newAvailable == desired {
|
||||
oldRc.Spec.Replicas = 0
|
||||
if valOrZero(newRc.Spec.Replicas) == desired && newAvailable == desired {
|
||||
oldRc.Spec.Replicas = newInt32Ptr(0)
|
||||
}
|
||||
// Perform the scale-down.
|
||||
fmt.Fprintf(config.Out, "Scaling %s down to %d\n", oldRc.Name, oldRc.Spec.Replicas)
|
||||
fmt.Fprintf(config.Out, "Scaling %s down to %d\n", oldRc.Name, valOrZero(oldRc.Spec.Replicas))
|
||||
retryWait := &RetryParams{config.Interval, config.Timeout}
|
||||
scaledRc, err := r.scaleAndWait(oldRc, retryWait, retryWait)
|
||||
if err != nil {
|
||||
@@ -399,9 +411,9 @@ func (r *RollingUpdater) scaleDown(newRc, oldRc *api.ReplicationController, desi
|
||||
}
|
||||
|
||||
// scalerScaleAndWait scales a controller using a Scaler and a real client.
|
||||
func (r *RollingUpdater) scaleAndWaitWithScaler(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error) {
|
||||
func (r *RollingUpdater) scaleAndWaitWithScaler(rc *corev1.ReplicationController, retry *RetryParams, wait *RetryParams) (*corev1.ReplicationController, error) {
|
||||
scaler := NewScaler(r.scaleClient)
|
||||
if err := scaler.Scale(rc.Namespace, rc.Name, uint(rc.Spec.Replicas), &ScalePrecondition{-1, ""}, retry, wait, schema.GroupResource{Resource: "replicationcontrollers"}); err != nil {
|
||||
if err := scaler.Scale(rc.Namespace, rc.Name, uint(valOrZero(rc.Spec.Replicas)), &ScalePrecondition{-1, ""}, retry, wait, schema.GroupResource{Resource: "replicationcontrollers"}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.rcClient.ReplicationControllers(rc.Namespace).Get(rc.Name, metav1.GetOptions{})
|
||||
@@ -410,8 +422,8 @@ func (r *RollingUpdater) scaleAndWaitWithScaler(rc *api.ReplicationController, r
|
||||
// readyPods returns the old and new ready counts for their pods.
|
||||
// If a pod is observed as being ready, it's considered ready even
|
||||
// if it later becomes notReady.
|
||||
func (r *RollingUpdater) readyPods(oldRc, newRc *api.ReplicationController, minReadySeconds int32) (int32, int32, error) {
|
||||
controllers := []*api.ReplicationController{oldRc, newRc}
|
||||
func (r *RollingUpdater) readyPods(oldRc, newRc *corev1.ReplicationController, minReadySeconds int32) (int32, int32, error) {
|
||||
controllers := []*corev1.ReplicationController{oldRc, newRc}
|
||||
oldReady := int32(0)
|
||||
newReady := int32(0)
|
||||
if r.nowFn == nil {
|
||||
@@ -426,16 +438,12 @@ func (r *RollingUpdater) readyPods(oldRc, newRc *api.ReplicationController, minR
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
v1Pod := &v1.Pod{}
|
||||
if err := apiv1.Convert_core_Pod_To_v1_Pod(&pod, v1Pod, nil); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
for _, v1Pod := range pods.Items {
|
||||
// Do not count deleted pods as ready
|
||||
if v1Pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
if !podutil.IsPodAvailable(v1Pod, minReadySeconds, r.nowFn()) {
|
||||
if !podutil.IsPodAvailable(&v1Pod, minReadySeconds, r.nowFn()) {
|
||||
continue
|
||||
}
|
||||
switch controller.Name {
|
||||
@@ -457,7 +465,7 @@ func (r *RollingUpdater) readyPods(oldRc, newRc *api.ReplicationController, minR
|
||||
//
|
||||
// Existing controllers are validated to ensure their sourceIdAnnotation
|
||||
// matches sourceId; if there's a mismatch, an error is returned.
|
||||
func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error) {
|
||||
func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *corev1.ReplicationController, sourceId string) (*corev1.ReplicationController, bool, error) {
|
||||
existingRc, err := r.existingController(controller)
|
||||
if err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
@@ -465,16 +473,16 @@ func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *api.R
|
||||
// should create it.
|
||||
return nil, false, err
|
||||
}
|
||||
if controller.Spec.Replicas <= 0 {
|
||||
return nil, false, fmt.Errorf("Invalid controller spec for %s; required: > 0 replicas, actual: %d\n", controller.Name, controller.Spec.Replicas)
|
||||
if valOrZero(controller.Spec.Replicas) <= 0 {
|
||||
return nil, false, fmt.Errorf("Invalid controller spec for %s; required: > 0 replicas, actual: %d\n", controller.Name, valOrZero(controller.Spec.Replicas))
|
||||
}
|
||||
// The controller wasn't found, so create it.
|
||||
if controller.Annotations == nil {
|
||||
controller.Annotations = map[string]string{}
|
||||
}
|
||||
controller.Annotations[desiredReplicasAnnotation] = fmt.Sprintf("%d", controller.Spec.Replicas)
|
||||
controller.Annotations[desiredReplicasAnnotation] = fmt.Sprintf("%d", valOrZero(controller.Spec.Replicas))
|
||||
controller.Annotations[sourceIdAnnotation] = sourceId
|
||||
controller.Spec.Replicas = 0
|
||||
controller.Spec.Replicas = newInt32Ptr(0)
|
||||
newRc, err := r.rcClient.ReplicationControllers(r.ns).Create(controller)
|
||||
return newRc, false, err
|
||||
}
|
||||
@@ -489,10 +497,10 @@ func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *api.R
|
||||
}
|
||||
|
||||
// existingController verifies if the controller already exists
|
||||
func (r *RollingUpdater) existingController(controller *api.ReplicationController) (*api.ReplicationController, error) {
|
||||
func (r *RollingUpdater) existingController(controller *corev1.ReplicationController) (*corev1.ReplicationController, error) {
|
||||
// without rc name but generate name, there's no existing rc
|
||||
if len(controller.Name) == 0 && len(controller.GenerateName) > 0 {
|
||||
return nil, errors.NewNotFound(api.Resource("replicationcontrollers"), controller.Name)
|
||||
return nil, errors.NewNotFound(corev1.Resource("replicationcontrollers"), controller.Name)
|
||||
}
|
||||
// controller name is required to get rc back
|
||||
return r.rcClient.ReplicationControllers(controller.Namespace).Get(controller.Name, metav1.GetOptions{})
|
||||
@@ -501,14 +509,14 @@ func (r *RollingUpdater) existingController(controller *api.ReplicationControlle
|
||||
// cleanupWithClients performs cleanup tasks after the rolling update. Update
|
||||
// process related annotations are removed from oldRc and newRc. The
|
||||
// CleanupPolicy on config is executed.
|
||||
func (r *RollingUpdater) cleanupWithClients(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error {
|
||||
func (r *RollingUpdater) cleanupWithClients(oldRc, newRc *corev1.ReplicationController, config *RollingUpdaterConfig) error {
|
||||
// Clean up annotations
|
||||
var err error
|
||||
newRc, err = r.rcClient.ReplicationControllers(r.ns).Get(newRc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
applyUpdate := func(rc *api.ReplicationController) {
|
||||
applyUpdate := func(rc *corev1.ReplicationController) {
|
||||
delete(rc.Annotations, sourceIdAnnotation)
|
||||
delete(rc.Annotations, desiredReplicasAnnotation)
|
||||
}
|
||||
@@ -544,7 +552,7 @@ func (r *RollingUpdater) cleanupWithClients(oldRc, newRc *api.ReplicationControl
|
||||
}
|
||||
}
|
||||
|
||||
func Rename(c coreclient.ReplicationControllersGetter, rc *api.ReplicationController, newName string) error {
|
||||
func Rename(c corev1client.ReplicationControllersGetter, rc *corev1.ReplicationController, newName string) error {
|
||||
oldName := rc.Name
|
||||
rc.Name = newName
|
||||
rc.ResourceVersion = ""
|
||||
@@ -572,7 +580,7 @@ func Rename(c coreclient.ReplicationControllersGetter, rc *api.ReplicationContro
|
||||
return err
|
||||
}
|
||||
|
||||
func LoadExistingNextReplicationController(c coreclient.ReplicationControllersGetter, namespace, newName string) (*api.ReplicationController, error) {
|
||||
func LoadExistingNextReplicationController(c corev1client.ReplicationControllersGetter, namespace, newName string) (*corev1.ReplicationController, error) {
|
||||
if len(newName) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -589,10 +597,10 @@ type NewControllerConfig struct {
|
||||
Image string
|
||||
Container string
|
||||
DeploymentKey string
|
||||
PullPolicy api.PullPolicy
|
||||
PullPolicy corev1.PullPolicy
|
||||
}
|
||||
|
||||
func CreateNewControllerFromCurrentController(rcClient coreclient.ReplicationControllersGetter, codec runtime.Codec, cfg *NewControllerConfig) (*api.ReplicationController, error) {
|
||||
func CreateNewControllerFromCurrentController(rcClient corev1client.ReplicationControllersGetter, codec runtime.Codec, cfg *NewControllerConfig) (*corev1.ReplicationController, error) {
|
||||
containerIndex := 0
|
||||
// load the old RC into the "new" RC
|
||||
newRc, err := rcClient.ReplicationControllers(cfg.Namespace).Get(cfg.OldName, metav1.GetOptions{})
|
||||
@@ -669,19 +677,19 @@ func AbortRollingUpdate(c *RollingUpdaterConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetNextControllerAnnotation(rc *api.ReplicationController) (string, bool) {
|
||||
func GetNextControllerAnnotation(rc *corev1.ReplicationController) (string, bool) {
|
||||
res, found := rc.Annotations[nextControllerAnnotation]
|
||||
return res, found
|
||||
}
|
||||
|
||||
func SetNextControllerAnnotation(rc *api.ReplicationController, name string) {
|
||||
func SetNextControllerAnnotation(rc *corev1.ReplicationController, name string) {
|
||||
if rc.Annotations == nil {
|
||||
rc.Annotations = map[string]string{}
|
||||
}
|
||||
rc.Annotations[nextControllerAnnotation] = name
|
||||
}
|
||||
|
||||
func UpdateExistingReplicationController(rcClient coreclient.ReplicationControllersGetter, podClient coreclient.PodsGetter, oldRc *api.ReplicationController, namespace, newName, deploymentKey, deploymentValue string, out io.Writer) (*api.ReplicationController, error) {
|
||||
func UpdateExistingReplicationController(rcClient corev1client.ReplicationControllersGetter, podClient corev1client.PodsGetter, oldRc *corev1.ReplicationController, namespace, newName, deploymentKey, deploymentValue string, out io.Writer) (*corev1.ReplicationController, error) {
|
||||
if _, found := oldRc.Spec.Selector[deploymentKey]; !found {
|
||||
SetNextControllerAnnotation(oldRc, newName)
|
||||
return AddDeploymentKeyToReplicationController(oldRc, rcClient, podClient, deploymentKey, deploymentValue, namespace, out)
|
||||
@@ -689,16 +697,16 @@ func UpdateExistingReplicationController(rcClient coreclient.ReplicationControll
|
||||
|
||||
// If we didn't need to update the controller for the deployment key, we still need to write
|
||||
// the "next" controller.
|
||||
applyUpdate := func(rc *api.ReplicationController) {
|
||||
applyUpdate := func(rc *corev1.ReplicationController) {
|
||||
SetNextControllerAnnotation(rc, newName)
|
||||
}
|
||||
return updateRcWithRetries(rcClient, namespace, oldRc, applyUpdate)
|
||||
}
|
||||
|
||||
func AddDeploymentKeyToReplicationController(oldRc *api.ReplicationController, rcClient coreclient.ReplicationControllersGetter, podClient coreclient.PodsGetter, deploymentKey, deploymentValue, namespace string, out io.Writer) (*api.ReplicationController, error) {
|
||||
func AddDeploymentKeyToReplicationController(oldRc *corev1.ReplicationController, rcClient corev1client.ReplicationControllersGetter, podClient corev1client.PodsGetter, deploymentKey, deploymentValue, namespace string, out io.Writer) (*corev1.ReplicationController, error) {
|
||||
var err error
|
||||
// First, update the template label. This ensures that any newly created pods will have the new label
|
||||
applyUpdate := func(rc *api.ReplicationController) {
|
||||
applyUpdate := func(rc *corev1.ReplicationController) {
|
||||
if rc.Spec.Template.Labels == nil {
|
||||
rc.Spec.Template.Labels = map[string]string{}
|
||||
}
|
||||
@@ -718,7 +726,7 @@ func AddDeploymentKeyToReplicationController(oldRc *api.ReplicationController, r
|
||||
}
|
||||
for ix := range podList.Items {
|
||||
pod := &podList.Items[ix]
|
||||
applyUpdate := func(p *api.Pod) {
|
||||
applyUpdate := func(p *corev1.Pod) {
|
||||
if p.Labels == nil {
|
||||
p.Labels = map[string]string{
|
||||
deploymentKey: deploymentValue,
|
||||
@@ -740,7 +748,7 @@ func AddDeploymentKeyToReplicationController(oldRc *api.ReplicationController, r
|
||||
for k, v := range oldRc.Spec.Selector {
|
||||
selectorCopy[k] = v
|
||||
}
|
||||
applyUpdate = func(rc *api.ReplicationController) {
|
||||
applyUpdate = func(rc *corev1.ReplicationController) {
|
||||
rc.Spec.Selector[deploymentKey] = deploymentValue
|
||||
}
|
||||
// Update the selector of the rc so it manages all the pods we updated above
|
||||
@@ -768,13 +776,13 @@ func AddDeploymentKeyToReplicationController(oldRc *api.ReplicationController, r
|
||||
return oldRc, nil
|
||||
}
|
||||
|
||||
type updateRcFunc func(controller *api.ReplicationController)
|
||||
type updateRcFunc func(controller *corev1.ReplicationController)
|
||||
|
||||
// updateRcWithRetries retries updating the given rc on conflict with the following steps:
|
||||
// 1. Get latest resource
|
||||
// 2. applyUpdate
|
||||
// 3. Update the resource
|
||||
func updateRcWithRetries(rcClient coreclient.ReplicationControllersGetter, namespace string, rc *api.ReplicationController, applyUpdate updateRcFunc) (*api.ReplicationController, error) {
|
||||
func updateRcWithRetries(rcClient corev1client.ReplicationControllersGetter, namespace string, rc *corev1.ReplicationController, applyUpdate updateRcFunc) (*corev1.ReplicationController, error) {
|
||||
// Deep copy the rc in case we failed on Get during retry loop
|
||||
oldRc := rc.DeepCopy()
|
||||
err := retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) {
|
||||
@@ -799,13 +807,13 @@ func updateRcWithRetries(rcClient coreclient.ReplicationControllersGetter, names
|
||||
return rc, err
|
||||
}
|
||||
|
||||
type updatePodFunc func(controller *api.Pod)
|
||||
type updatePodFunc func(controller *corev1.Pod)
|
||||
|
||||
// updatePodWithRetries retries updating the given pod on conflict with the following steps:
|
||||
// 1. Get latest resource
|
||||
// 2. applyUpdate
|
||||
// 3. Update the resource
|
||||
func updatePodWithRetries(podClient coreclient.PodsGetter, namespace string, pod *api.Pod, applyUpdate updatePodFunc) (*api.Pod, error) {
|
||||
func updatePodWithRetries(podClient corev1client.PodsGetter, namespace string, pod *corev1.Pod, applyUpdate updatePodFunc) (*corev1.Pod, error) {
|
||||
// Deep copy the pod in case we failed on Get during retry loop
|
||||
oldPod := pod.DeepCopy()
|
||||
err := retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) {
|
||||
@@ -826,7 +834,7 @@ func updatePodWithRetries(podClient coreclient.PodsGetter, namespace string, pod
|
||||
return pod, err
|
||||
}
|
||||
|
||||
func FindSourceController(r coreclient.ReplicationControllersGetter, namespace, name string) (*api.ReplicationController, error) {
|
||||
func FindSourceController(r corev1client.ReplicationControllersGetter, namespace, name string) (*corev1.ReplicationController, error) {
|
||||
list, err := r.ReplicationControllers(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
Reference in New Issue
Block a user