Bumping k8s dependencies to 1.13
This commit is contained in:
70
vendor/k8s.io/kubernetes/pkg/controller/deployment/BUILD
generated
vendored
70
vendor/k8s.io/kubernetes/pkg/controller/deployment/BUILD
generated
vendored
@@ -22,28 +22,27 @@ go_library(
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/integer:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/integer:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -71,20 +70,21 @@ go_test(
|
||||
"//pkg/apis/storage/install:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//pkg/controller/testutil:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/deployment/OWNERS
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/deployment/OWNERS
generated
vendored
@@ -5,7 +5,8 @@ approvers:
|
||||
- mfojtik
|
||||
reviewers:
|
||||
- janetkuo
|
||||
- nikhiljindal
|
||||
- kargakis
|
||||
- mfojtik
|
||||
- tnozicka
|
||||
labels:
|
||||
- sig/apps
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go
generated
vendored
@@ -608,7 +608,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
|
||||
}
|
||||
|
||||
if d.DeletionTimestamp != nil {
|
||||
return dc.syncStatusOnly(d, rsList, podMap)
|
||||
return dc.syncStatusOnly(d, rsList)
|
||||
}
|
||||
|
||||
// Update deployment conditions with an Unknown condition when pausing/resuming
|
||||
@@ -619,29 +619,29 @@ func (dc *DeploymentController) syncDeployment(key string) error {
|
||||
}
|
||||
|
||||
if d.Spec.Paused {
|
||||
return dc.sync(d, rsList, podMap)
|
||||
return dc.sync(d, rsList)
|
||||
}
|
||||
|
||||
// rollback is not re-entrant in case the underlying replica sets are updated with a new
|
||||
// revision so we should ensure that we won't proceed to update replica sets until we
|
||||
// make sure that the deployment has cleaned up its rollback spec in subsequent enqueues.
|
||||
if getRollbackTo(d) != nil {
|
||||
return dc.rollback(d, rsList, podMap)
|
||||
return dc.rollback(d, rsList)
|
||||
}
|
||||
|
||||
scalingEvent, err := dc.isScalingEvent(d, rsList, podMap)
|
||||
scalingEvent, err := dc.isScalingEvent(d, rsList)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if scalingEvent {
|
||||
return dc.sync(d, rsList, podMap)
|
||||
return dc.sync(d, rsList)
|
||||
}
|
||||
|
||||
switch d.Spec.Strategy.Type {
|
||||
case apps.RecreateDeploymentStrategyType:
|
||||
return dc.rolloutRecreate(d, rsList, podMap)
|
||||
case apps.RollingUpdateDeploymentStrategyType:
|
||||
return dc.rolloutRolling(d, rsList, podMap)
|
||||
return dc.rolloutRolling(d, rsList)
|
||||
}
|
||||
return fmt.Errorf("unexpected deployment strategy type: %s", d.Spec.Strategy.Type)
|
||||
}
|
||||
|
23
vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller_test.go
generated
vendored
23
vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller_test.go
generated
vendored
@@ -45,6 +45,7 @@ import (
|
||||
_ "k8s.io/kubernetes/pkg/apis/storage/install"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -77,7 +78,7 @@ func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map
|
||||
|
||||
func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *apps.Deployment {
|
||||
d := apps.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1"},
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: "Deployment"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: name,
|
||||
@@ -120,6 +121,7 @@ func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSu
|
||||
|
||||
func newReplicaSet(d *apps.Deployment, name string, replicas int) *apps.ReplicaSet {
|
||||
return &apps.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{Kind: "ReplicaSet"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
UID: uuid.NewUUID(),
|
||||
@@ -135,15 +137,6 @@ func newReplicaSet(d *apps.Deployment, name string, replicas int) *apps.ReplicaS
|
||||
}
|
||||
}
|
||||
|
||||
func getKey(d *apps.Deployment, t *testing.T) string {
|
||||
if key, err := controller.KeyFunc(d); err != nil {
|
||||
t.Errorf("Unexpected error getting key for deployment %v: %v", d.Name, err)
|
||||
return ""
|
||||
} else {
|
||||
return key
|
||||
}
|
||||
}
|
||||
|
||||
type fixture struct {
|
||||
t *testing.T
|
||||
|
||||
@@ -285,7 +278,7 @@ func TestSyncDeploymentCreatesReplicaSet(t *testing.T) {
|
||||
f.expectUpdateDeploymentStatusAction(d)
|
||||
f.expectUpdateDeploymentStatusAction(d)
|
||||
|
||||
f.run(getKey(d, t))
|
||||
f.run(testutil.GetKey(d, t))
|
||||
}
|
||||
|
||||
func TestSyncDeploymentDontDoAnythingDuringDeletion(t *testing.T) {
|
||||
@@ -298,7 +291,7 @@ func TestSyncDeploymentDontDoAnythingDuringDeletion(t *testing.T) {
|
||||
f.objects = append(f.objects, d)
|
||||
|
||||
f.expectUpdateDeploymentStatusAction(d)
|
||||
f.run(getKey(d, t))
|
||||
f.run(testutil.GetKey(d, t))
|
||||
}
|
||||
|
||||
func TestSyncDeploymentDeletionRace(t *testing.T) {
|
||||
@@ -323,7 +316,7 @@ func TestSyncDeploymentDeletionRace(t *testing.T) {
|
||||
f.expectGetDeploymentAction(d)
|
||||
// Sync should fail and requeue to let cache catch up.
|
||||
// Don't start informers, since we don't want cache to catch up for this test.
|
||||
f.runExpectError(getKey(d, t), false)
|
||||
f.runExpectError(testutil.GetKey(d, t), false)
|
||||
}
|
||||
|
||||
// issue: https://github.com/kubernetes/kubernetes/issues/23218
|
||||
@@ -337,7 +330,7 @@ func TestDontSyncDeploymentsWithEmptyPodSelector(t *testing.T) {
|
||||
|
||||
// Normally there should be a status update to sync observedGeneration but the fake
|
||||
// deployment has no generation set so there is no action happpening here.
|
||||
f.run(getKey(d, t))
|
||||
f.run(testutil.GetKey(d, t))
|
||||
}
|
||||
|
||||
func TestReentrantRollback(t *testing.T) {
|
||||
@@ -364,7 +357,7 @@ func TestReentrantRollback(t *testing.T) {
|
||||
// Rollback is done here
|
||||
f.expectUpdateDeploymentAction(d)
|
||||
// Expect no update on replica sets though
|
||||
f.run(getKey(d, t))
|
||||
f.run(testutil.GetKey(d, t))
|
||||
}
|
||||
|
||||
// TestPodDeletionEnqueuesRecreateDeployment ensures that the deletion of a pod
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/controller/deployment/progress.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/controller/deployment/progress.go
generated
vendored
@@ -36,7 +36,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*apps.ReplicaSet, new
|
||||
newStatus := calculateStatus(allRSs, newRS, d)
|
||||
|
||||
// If there is no progressDeadlineSeconds set, remove any Progressing condition.
|
||||
if d.Spec.ProgressDeadlineSeconds == nil {
|
||||
if !util.HasProgressDeadline(d) {
|
||||
util.RemoveDeploymentCondition(&newStatus, apps.DeploymentProgressing)
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*apps.ReplicaSet, new
|
||||
isCompleteDeployment := newStatus.Replicas == newStatus.UpdatedReplicas && currentCond != nil && currentCond.Reason == util.NewRSAvailableReason
|
||||
// Check for progress only if there is a progress deadline set and the latest rollout
|
||||
// hasn't completed yet.
|
||||
if d.Spec.ProgressDeadlineSeconds != nil && !isCompleteDeployment {
|
||||
if util.HasProgressDeadline(d) && !isCompleteDeployment {
|
||||
switch {
|
||||
case util.DeploymentComplete(d, &newStatus):
|
||||
// Update the deployment conditions with a message for the new replica set that
|
||||
@@ -159,7 +159,7 @@ var nowFn = func() time.Time { return time.Now() }
|
||||
func (dc *DeploymentController) requeueStuckDeployment(d *apps.Deployment, newStatus apps.DeploymentStatus) time.Duration {
|
||||
currentCond := util.GetDeploymentCondition(d.Status, apps.DeploymentProgressing)
|
||||
// Can't estimate progress if there is no deadline in the spec or progressing condition in the current status.
|
||||
if d.Spec.ProgressDeadlineSeconds == nil || currentCond == nil {
|
||||
if !util.HasProgressDeadline(d) || currentCond == nil {
|
||||
return time.Duration(-1)
|
||||
}
|
||||
// No need to estimate progress if the rollout is complete or already timed out.
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/controller/deployment/progress_test.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/controller/deployment/progress_test.go
generated
vendored
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package deployment
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -67,6 +68,7 @@ func newRSWithAvailable(name string, specReplicas, statusReplicas, availableRepl
|
||||
|
||||
func TestRequeueStuckDeployment(t *testing.T) {
|
||||
pds := int32(60)
|
||||
infinite := int32(math.MaxInt32)
|
||||
failed := []apps.DeploymentCondition{
|
||||
{
|
||||
Type: apps.DeploymentProgressing,
|
||||
@@ -90,11 +92,17 @@ func TestRequeueStuckDeployment(t *testing.T) {
|
||||
expected time.Duration
|
||||
}{
|
||||
{
|
||||
name: "no progressDeadlineSeconds specified",
|
||||
name: "nil progressDeadlineSeconds specified",
|
||||
d: currentDeployment(nil, 4, 3, 3, 2, nil),
|
||||
status: newDeploymentStatus(3, 3, 2),
|
||||
expected: time.Duration(-1),
|
||||
},
|
||||
{
|
||||
name: "infinite progressDeadlineSeconds specified",
|
||||
d: currentDeployment(&infinite, 4, 3, 3, 2, nil),
|
||||
status: newDeploymentStatus(3, 3, 2),
|
||||
expected: time.Duration(-1),
|
||||
},
|
||||
{
|
||||
name: "no progressing condition found",
|
||||
d: currentDeployment(&pds, 4, 3, 3, 2, nil),
|
||||
@@ -330,7 +338,7 @@ func TestSyncRolloutStatus(t *testing.T) {
|
||||
newCond := util.GetDeploymentCondition(test.d.Status, test.conditionType)
|
||||
switch {
|
||||
case newCond == nil:
|
||||
if test.d.Spec.ProgressDeadlineSeconds != nil {
|
||||
if test.d.Spec.ProgressDeadlineSeconds != nil && *test.d.Spec.ProgressDeadlineSeconds != math.MaxInt32 {
|
||||
t.Errorf("%s: expected deployment condition: %s", test.name, test.conditionType)
|
||||
}
|
||||
case newCond.Status != test.conditionStatus || newCond.Reason != test.conditionReason:
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate.go
generated
vendored
@@ -27,7 +27,7 @@ import (
|
||||
// rolloutRecreate implements the logic for recreating a replica set.
|
||||
func (dc *DeploymentController) rolloutRecreate(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
// Don't create a new RS if not already existed, so that we avoid scaling up before scaling down.
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -51,7 +51,7 @@ func (dc *DeploymentController) rolloutRecreate(d *apps.Deployment, rsList []*ap
|
||||
|
||||
// If we need to create a new RS, create it now.
|
||||
if newRS == nil {
|
||||
newRS, oldRSs, err = dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, true)
|
||||
newRS, oldRSs, err = dc.getAllReplicaSetsAndSyncRevision(d, rsList, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/controller/deployment/rollback.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/controller/deployment/rollback.go
generated
vendored
@@ -25,13 +25,12 @@ import (
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
)
|
||||
|
||||
// rollback the deployment to the specified revision. In any case cleanup the rollback spec.
|
||||
func (dc *DeploymentController) rollback(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
newRS, allOldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, true)
|
||||
func (dc *DeploymentController) rollback(d *apps.Deployment, rsList []*apps.ReplicaSet) error {
|
||||
newRS, allOldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/controller/deployment/rolling.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/controller/deployment/rolling.go
generated
vendored
@@ -22,16 +22,14 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/integer"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
)
|
||||
|
||||
// rolloutRolling implements the logic for rolling a new replica set.
|
||||
func (dc *DeploymentController) rolloutRolling(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, true)
|
||||
func (dc *DeploymentController) rolloutRolling(d *apps.Deployment, rsList []*apps.ReplicaSet) error {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync.go
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync.go
generated
vendored
@@ -27,16 +27,14 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
)
|
||||
|
||||
// syncStatusOnly only updates Deployments Status and doesn't take any mutating actions.
|
||||
func (dc *DeploymentController) syncStatusOnly(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
|
||||
func (dc *DeploymentController) syncStatusOnly(d *apps.Deployment, rsList []*apps.ReplicaSet) error {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -47,8 +45,8 @@ func (dc *DeploymentController) syncStatusOnly(d *apps.Deployment, rsList []*app
|
||||
|
||||
// sync is responsible for reconciling deployments on scaling events or when they
|
||||
// are paused.
|
||||
func (dc *DeploymentController) sync(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
|
||||
func (dc *DeploymentController) sync(d *apps.Deployment, rsList []*apps.ReplicaSet) error {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -73,7 +71,7 @@ func (dc *DeploymentController) sync(d *apps.Deployment, rsList []*apps.ReplicaS
|
||||
// These conditions are needed so that we won't accidentally report lack of progress for resumed deployments
|
||||
// that were paused for longer than progressDeadlineSeconds.
|
||||
func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error {
|
||||
if d.Spec.ProgressDeadlineSeconds == nil {
|
||||
if !deploymentutil.HasProgressDeadline(d) {
|
||||
return nil
|
||||
}
|
||||
cond := deploymentutil.GetDeploymentCondition(d.Status, apps.DeploymentProgressing)
|
||||
@@ -106,7 +104,6 @@ func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error
|
||||
// getAllReplicaSetsAndSyncRevision returns all the replica sets for the provided deployment (new and all old), with new RS's and deployment's revision updated.
|
||||
//
|
||||
// rsList should come from getReplicaSetsForDeployment(d).
|
||||
// podMap should come from getPodMapForDeployment(d, rsList).
|
||||
//
|
||||
// 1. Get all old RSes this deployment targets, and calculate the max revision number among them (maxOldV).
|
||||
// 2. Get new RS this deployment targets (whose pod template matches deployment's), and update new RS's revision number to (maxOldV + 1),
|
||||
@@ -115,7 +112,7 @@ func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error
|
||||
//
|
||||
// Note that currently the deployment controller is using caches to avoid querying the server for reads.
|
||||
// This may lead to stale reads of replica sets, thus incorrect deployment status.
|
||||
func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList, createIfNotExisted bool) (*apps.ReplicaSet, []*apps.ReplicaSet, error) {
|
||||
func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *apps.Deployment, rsList []*apps.ReplicaSet, createIfNotExisted bool) (*apps.ReplicaSet, []*apps.ReplicaSet, error) {
|
||||
_, allOldRSs := deploymentutil.FindOldReplicaSets(d, rsList)
|
||||
|
||||
// Get new replica set with the updated revision number
|
||||
@@ -161,7 +158,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
||||
// of this deployment then it is likely that old users started caring about progress. In that
|
||||
// case we need to take into account the first time we noticed their new replica set.
|
||||
cond := deploymentutil.GetDeploymentCondition(d.Status, apps.DeploymentProgressing)
|
||||
if d.Spec.ProgressDeadlineSeconds != nil && cond == nil {
|
||||
if deploymentutil.HasProgressDeadline(d) && cond == nil {
|
||||
msg := fmt.Sprintf("Found new replica set %q", rsCopy.Name)
|
||||
condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, deploymentutil.FoundNewRSReason, msg)
|
||||
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
|
||||
@@ -183,7 +180,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
||||
|
||||
// new ReplicaSet does not exist, create one.
|
||||
newRSTemplate := *d.Spec.Template.DeepCopy()
|
||||
podTemplateSpecHash := fmt.Sprintf("%d", controller.ComputeHash(&newRSTemplate, d.Status.CollisionCount))
|
||||
podTemplateSpecHash := controller.ComputeHash(&newRSTemplate, d.Status.CollisionCount)
|
||||
newRSTemplate.Labels = labelsutil.CloneAndAddLabel(d.Spec.Template.Labels, apps.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash)
|
||||
// Add podTemplateHash label to selector.
|
||||
newRSSelector := labelsutil.CloneSelectorAndAddLabel(d.Spec.Selector, apps.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash)
|
||||
@@ -192,7 +189,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
||||
newRS := apps.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// Make the name deterministic, to ensure idempotence
|
||||
Name: d.Name + "-" + rand.SafeEncodeString(podTemplateSpecHash),
|
||||
Name: d.Name + "-" + podTemplateSpecHash,
|
||||
Namespace: d.Namespace,
|
||||
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(d, controllerKind)},
|
||||
Labels: newRSTemplate.Labels,
|
||||
@@ -256,7 +253,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
||||
return nil, err
|
||||
case err != nil:
|
||||
msg := fmt.Sprintf("Failed to create new replica set %q: %v", newRS.Name, err)
|
||||
if d.Spec.ProgressDeadlineSeconds != nil {
|
||||
if deploymentutil.HasProgressDeadline(d) {
|
||||
cond := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionFalse, deploymentutil.FailedRSCreateReason, msg)
|
||||
deploymentutil.SetDeploymentCondition(&d.Status, *cond)
|
||||
// We don't really care about this error at this point, since we have a bigger issue to report.
|
||||
@@ -272,7 +269,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
||||
}
|
||||
|
||||
needsUpdate := deploymentutil.SetDeploymentRevision(d, newRevision)
|
||||
if !alreadyExists && d.Spec.ProgressDeadlineSeconds != nil {
|
||||
if !alreadyExists && deploymentutil.HasProgressDeadline(d) {
|
||||
msg := fmt.Sprintf("Created new replica set %q", createdRS.Name)
|
||||
condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, deploymentutil.NewReplicaSetReason, msg)
|
||||
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
|
||||
@@ -520,8 +517,8 @@ func calculateStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployme
|
||||
//
|
||||
// rsList should come from getReplicaSetsForDeployment(d).
|
||||
// podMap should come from getPodMapForDeployment(d, rsList).
|
||||
func (dc *DeploymentController) isScalingEvent(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) (bool, error) {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
|
||||
func (dc *DeploymentController) isScalingEvent(d *apps.Deployment, rsList []*apps.ReplicaSet) (bool, error) {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, false)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
54
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD
generated
vendored
54
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD
generated
vendored
@@ -8,33 +8,23 @@ load(
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"deployment_util.go",
|
||||
"pod_util.go",
|
||||
"replicaset_util.go",
|
||||
],
|
||||
srcs = ["deployment_util.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/deployment/util",
|
||||
deps = [
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/integer:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/integer:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -48,16 +38,16 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/util/hash:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
48
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
48
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
@@ -18,6 +18,7 @@ package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -36,7 +37,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
"k8s.io/client-go/util/integer"
|
||||
internalextensions "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
)
|
||||
@@ -575,29 +575,6 @@ func ListReplicaSets(deployment *apps.Deployment, getRSList RsListFunc) ([]*apps
|
||||
return owned, nil
|
||||
}
|
||||
|
||||
// ListReplicaSetsInternal is ListReplicaSets for internalextensions.
|
||||
// TODO: Remove the duplicate when call sites are updated to ListReplicaSets.
|
||||
func ListReplicaSetsInternal(deployment *internalextensions.Deployment, getRSList func(string, metav1.ListOptions) ([]*internalextensions.ReplicaSet, error)) ([]*internalextensions.ReplicaSet, error) {
|
||||
namespace := deployment.Namespace
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
all, err := getRSList(namespace, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Only include those whose ControllerRef matches the Deployment.
|
||||
filtered := make([]*internalextensions.ReplicaSet, 0, len(all))
|
||||
for _, rs := range all {
|
||||
if metav1.IsControlledBy(rs, deployment) {
|
||||
filtered = append(filtered, rs)
|
||||
}
|
||||
}
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
// ListPods returns a list of pods the given deployment targets.
|
||||
// This needs a list of ReplicaSets for the Deployment,
|
||||
// which can be found with ListReplicaSets().
|
||||
@@ -773,7 +750,7 @@ var nowFn = func() time.Time { return time.Now() }
|
||||
// is older than progressDeadlineSeconds or a Progressing condition with a TimedOutReason reason already
|
||||
// exists.
|
||||
func DeploymentTimedOut(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool {
|
||||
if deployment.Spec.ProgressDeadlineSeconds == nil {
|
||||
if !HasProgressDeadline(deployment) {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -876,19 +853,6 @@ func WaitForObservedDeployment(getDeploymentFunc func() (*apps.Deployment, error
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: remove the duplicate
|
||||
// WaitForObservedInternalDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration.
|
||||
// Returns error if polling timesout.
|
||||
func WaitForObservedDeploymentInternal(getDeploymentFunc func() (*internalextensions.Deployment, error), desiredGeneration int64, interval, timeout time.Duration) error {
|
||||
return wait.Poll(interval, timeout, func() (bool, error) {
|
||||
deployment, err := getDeploymentFunc()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return deployment.Status.ObservedGeneration >= desiredGeneration, nil
|
||||
})
|
||||
}
|
||||
|
||||
// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one
|
||||
// step. For example:
|
||||
//
|
||||
@@ -899,11 +863,11 @@ func WaitForObservedDeploymentInternal(getDeploymentFunc func() (*internalextens
|
||||
// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
|
||||
// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1)
|
||||
func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) {
|
||||
surge, err := intstrutil.GetValueFromIntOrPercent(maxSurge, int(desired), true)
|
||||
surge, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), int(desired), true)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
unavailable, err := intstrutil.GetValueFromIntOrPercent(maxUnavailable, int(desired), false)
|
||||
unavailable, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), int(desired), false)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
@@ -918,3 +882,7 @@ func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired
|
||||
|
||||
return int32(surge), int32(unavailable), nil
|
||||
}
|
||||
|
||||
func HasProgressDeadline(d *apps.Deployment) bool {
|
||||
return d.Spec.ProgressDeadlineSeconds != nil && *d.Spec.ProgressDeadlineSeconds != math.MaxInt32
|
||||
}
|
||||
|
78
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util_test.go
generated
vendored
78
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util_test.go
generated
vendored
@@ -18,6 +18,7 @@ package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"sort"
|
||||
@@ -617,52 +618,83 @@ func TestGetReplicaCountForReplicaSets(t *testing.T) {
|
||||
|
||||
func TestResolveFenceposts(t *testing.T) {
|
||||
tests := []struct {
|
||||
maxSurge string
|
||||
maxUnavailable string
|
||||
maxSurge *string
|
||||
maxUnavailable *string
|
||||
desired int32
|
||||
expectSurge int32
|
||||
expectUnavailable int32
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
maxSurge: "0%",
|
||||
maxUnavailable: "0%",
|
||||
maxSurge: newString("0%"),
|
||||
maxUnavailable: newString("0%"),
|
||||
desired: 0,
|
||||
expectSurge: 0,
|
||||
expectUnavailable: 1,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
maxSurge: "39%",
|
||||
maxUnavailable: "39%",
|
||||
maxSurge: newString("39%"),
|
||||
maxUnavailable: newString("39%"),
|
||||
desired: 10,
|
||||
expectSurge: 4,
|
||||
expectUnavailable: 3,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
maxSurge: "oops",
|
||||
maxUnavailable: "39%",
|
||||
maxSurge: newString("oops"),
|
||||
maxUnavailable: newString("39%"),
|
||||
desired: 10,
|
||||
expectSurge: 0,
|
||||
expectUnavailable: 0,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
maxSurge: "55%",
|
||||
maxUnavailable: "urg",
|
||||
maxSurge: newString("55%"),
|
||||
maxUnavailable: newString("urg"),
|
||||
desired: 10,
|
||||
expectSurge: 0,
|
||||
expectUnavailable: 0,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
maxSurge: nil,
|
||||
maxUnavailable: newString("39%"),
|
||||
desired: 10,
|
||||
expectSurge: 0,
|
||||
expectUnavailable: 3,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
maxSurge: newString("39%"),
|
||||
maxUnavailable: nil,
|
||||
desired: 10,
|
||||
expectSurge: 4,
|
||||
expectUnavailable: 0,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
maxSurge: nil,
|
||||
maxUnavailable: nil,
|
||||
desired: 10,
|
||||
expectSurge: 0,
|
||||
expectUnavailable: 1,
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for num, test := range tests {
|
||||
t.Run("maxSurge="+test.maxSurge, func(t *testing.T) {
|
||||
maxSurge := intstr.FromString(test.maxSurge)
|
||||
maxUnavail := intstr.FromString(test.maxUnavailable)
|
||||
surge, unavail, err := ResolveFenceposts(&maxSurge, &maxUnavail, test.desired)
|
||||
t.Run(fmt.Sprintf("%d", num), func(t *testing.T) {
|
||||
var maxSurge, maxUnavail *intstr.IntOrString
|
||||
if test.maxSurge != nil {
|
||||
surge := intstr.FromString(*test.maxSurge)
|
||||
maxSurge = &surge
|
||||
}
|
||||
if test.maxUnavailable != nil {
|
||||
unavail := intstr.FromString(*test.maxUnavailable)
|
||||
maxUnavail = &unavail
|
||||
}
|
||||
surge, unavail, err := ResolveFenceposts(maxSurge, maxUnavail, test.desired)
|
||||
if err != nil && !test.expectError {
|
||||
t.Errorf("unexpected error %v", err)
|
||||
}
|
||||
@@ -676,6 +708,10 @@ func TestResolveFenceposts(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func newString(s string) *string {
|
||||
return &s
|
||||
}
|
||||
|
||||
func TestNewRSNewReplicas(t *testing.T) {
|
||||
tests := []struct {
|
||||
Name string
|
||||
@@ -1068,8 +1104,9 @@ func TestDeploymentProgressing(t *testing.T) {
|
||||
|
||||
func TestDeploymentTimedOut(t *testing.T) {
|
||||
var (
|
||||
null *int32
|
||||
ten = int32(10)
|
||||
null *int32
|
||||
ten = int32(10)
|
||||
infinite = int32(math.MaxInt32)
|
||||
)
|
||||
|
||||
timeFn := func(min, sec int) time.Time {
|
||||
@@ -1102,12 +1139,19 @@ func TestDeploymentTimedOut(t *testing.T) {
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "no progressDeadlineSeconds specified - no timeout",
|
||||
name: "nil progressDeadlineSeconds specified - no timeout",
|
||||
|
||||
d: deployment(apps.DeploymentProgressing, v1.ConditionTrue, "", null, timeFn(1, 9)),
|
||||
nowFn: func() time.Time { return timeFn(1, 20) },
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "infinite progressDeadlineSeconds specified - no timeout",
|
||||
|
||||
d: deployment(apps.DeploymentProgressing, v1.ConditionTrue, "", &infinite, timeFn(1, 9)),
|
||||
nowFn: func() time.Time { return timeFn(1, 20) },
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "progressDeadlineSeconds: 10s, now - started => 00:01:20 - 00:01:09 => 11s",
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/hash_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/hash_test.go
generated
vendored
@@ -105,7 +105,7 @@ var podSpec string = `
|
||||
`
|
||||
|
||||
func TestPodTemplateSpecHash(t *testing.T) {
|
||||
seenHashes := make(map[uint32]int)
|
||||
seenHashes := make(map[string]int)
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
specJson := strings.Replace(podSpec, "@@VERSION@@", strconv.Itoa(i), 1)
|
||||
|
60
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/pod_util.go
generated
vendored
60
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/pod_util.go
generated
vendored
@@ -1,60 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
errorsutil "k8s.io/apimachinery/pkg/util/errors"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/util/retry"
|
||||
)
|
||||
|
||||
// TODO: use client library instead when it starts to support update retries
|
||||
// see https://github.com/kubernetes/kubernetes/issues/21479
|
||||
type updatePodFunc func(pod *v1.Pod) error
|
||||
|
||||
// UpdatePodWithRetries updates a pod with given applyUpdate function.
|
||||
func UpdatePodWithRetries(podClient v1core.PodInterface, podLister corelisters.PodLister, namespace, name string, applyUpdate updatePodFunc) (*v1.Pod, error) {
|
||||
var pod *v1.Pod
|
||||
|
||||
retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
var err error
|
||||
pod, err = podLister.Pods(namespace).Get(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pod = pod.DeepCopy()
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
if applyErr := applyUpdate(pod); applyErr != nil {
|
||||
return applyErr
|
||||
}
|
||||
pod, err = podClient.Update(pod)
|
||||
return err
|
||||
})
|
||||
|
||||
// Ignore the precondition violated error, this pod is already updated
|
||||
// with the desired label.
|
||||
if retryErr == errorsutil.ErrPreconditionViolated {
|
||||
glog.V(4).Infof("Pod %s/%s precondition doesn't hold, skip updating it.", namespace, name)
|
||||
retryErr = nil
|
||||
}
|
||||
|
||||
return pod, retryErr
|
||||
}
|
60
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/replicaset_util.go
generated
vendored
60
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/replicaset_util.go
generated
vendored
@@ -1,60 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
errorsutil "k8s.io/apimachinery/pkg/util/errors"
|
||||
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||
"k8s.io/client-go/util/retry"
|
||||
)
|
||||
|
||||
// TODO: use client library instead when it starts to support update retries
|
||||
// see https://github.com/kubernetes/kubernetes/issues/21479
|
||||
type updateRSFunc func(rs *apps.ReplicaSet) error
|
||||
|
||||
// UpdateRSWithRetries updates a RS with given applyUpdate function. Note that RS not found error is ignored.
|
||||
// The returned bool value can be used to tell if the RS is actually updated.
|
||||
func UpdateRSWithRetries(rsClient appsclient.ReplicaSetInterface, rsLister appslisters.ReplicaSetLister, namespace, name string, applyUpdate updateRSFunc) (*apps.ReplicaSet, error) {
|
||||
var rs *apps.ReplicaSet
|
||||
|
||||
retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
var err error
|
||||
rs, err = rsLister.ReplicaSets(namespace).Get(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rs = rs.DeepCopy()
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
if applyErr := applyUpdate(rs); applyErr != nil {
|
||||
return applyErr
|
||||
}
|
||||
rs, err = rsClient.Update(rs)
|
||||
return err
|
||||
})
|
||||
|
||||
// Ignore the precondition violated error, but the RS isn't updated.
|
||||
if retryErr == errorsutil.ErrPreconditionViolated {
|
||||
glog.V(4).Infof("Replica set %s/%s precondition doesn't hold, skip updating it.", namespace, name)
|
||||
retryErr = nil
|
||||
}
|
||||
|
||||
return rs, retryErr
|
||||
}
|
Reference in New Issue
Block a user