Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -3,6 +3,23 @@ package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = ["node_ipam_controller_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/cloudprovider/providers/fake:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/nodeipam/ipam:go_default_library",
"//pkg/controller/testutil:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
],
)
go_library(
@@ -19,15 +36,15 @@ go_library(
"//pkg/controller/nodeipam/ipam:go_default_library",
"//pkg/controller/nodeipam/ipam/sync:go_default_library",
"//pkg/util/metrics:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
],
)

View File

@@ -20,12 +20,12 @@ go_test(
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
"//pkg/controller/nodeipam/ipam/test:go_default_library",
"//pkg/controller/testutil:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
],
)
@@ -52,24 +52,24 @@ go_library(
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/util/node:go_default_library",
"//pkg/util/taints:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/metrics/pkg/client/clientset/versioned/scheme:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme:go_default_library",
],
)

View File

@@ -31,7 +31,7 @@ import (
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
nodeutil "k8s.io/kubernetes/pkg/util/node"
"k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme"
"k8s.io/metrics/pkg/client/clientset/versioned/scheme"
)
type adapter struct {

View File

@@ -71,7 +71,10 @@ const (
cidrUpdateRetries = 3
// updateRetryTimeout is the time to wait before requeing a failed node for retry
updateRetryTimeout = 100 * time.Millisecond
updateRetryTimeout = 250 * time.Millisecond
// maxUpdateRetryTimeout is the maximum amount of time between timeouts.
maxUpdateRetryTimeout = 5 * time.Second
// updateMaxRetries is the max retries for a failed node
updateMaxRetries = 10

View File

@@ -18,12 +18,14 @@ package ipam
import (
"fmt"
"math/rand"
"net"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@@ -33,7 +35,6 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
@@ -155,14 +156,19 @@ func (ca *cloudCIDRAllocator) worker(stopChan <-chan struct{}) {
glog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed")
return
}
if err := ca.updateCIDRAllocation(workItem); err != nil {
if ca.canRetry(workItem) {
time.AfterFunc(updateRetryTimeout, func() {
if err := ca.updateCIDRAllocation(workItem); err == nil {
glog.V(3).Infof("Updated CIDR for %q", workItem)
} else {
glog.Errorf("Error updating CIDR for %q: %v", workItem, err)
if canRetry, timeout := ca.retryParams(workItem); canRetry {
glog.V(2).Infof("Retrying update for %q after %v", workItem, timeout)
time.AfterFunc(timeout, func() {
// Requeue the failed node for update again.
ca.nodeUpdateChannel <- workItem
})
continue
}
glog.Errorf("Exceeded retry count for %q, dropping from queue", workItem)
}
ca.removeNodeFromProcessing(workItem)
case <-stopChan:
@@ -181,15 +187,34 @@ func (ca *cloudCIDRAllocator) insertNodeToProcessing(nodeName string) bool {
return true
}
func (ca *cloudCIDRAllocator) canRetry(nodeName string) bool {
func (ca *cloudCIDRAllocator) retryParams(nodeName string) (bool, time.Duration) {
ca.lock.Lock()
defer ca.lock.Unlock()
count := ca.nodesInProcessing[nodeName].retries + 1
entry, ok := ca.nodesInProcessing[nodeName]
if !ok {
glog.Errorf("Cannot get retryParams for %q as entry does not exist", nodeName)
return false, 0
}
count := entry.retries + 1
if count > updateMaxRetries {
return false
return false, 0
}
ca.nodesInProcessing[nodeName].retries = count
return true
return true, nodeUpdateRetryTimeout(count)
}
func nodeUpdateRetryTimeout(count int) time.Duration {
timeout := updateRetryTimeout
for i := 0; i < count && timeout < maxUpdateRetryTimeout; i++ {
timeout *= 2
}
if timeout > maxUpdateRetryTimeout {
timeout = maxUpdateRetryTimeout
}
return time.Duration(timeout.Nanoseconds()/2 + rand.Int63n(timeout.Nanoseconds()))
}
func (ca *cloudCIDRAllocator) removeNodeFromProcessing(nodeName string) {

View File

@@ -17,6 +17,7 @@ limitations under the License.
package ipam
import (
"fmt"
"testing"
"time"
@@ -57,3 +58,26 @@ func TestBoundedRetries(t *testing.T) {
// wait for node to finish processing (should terminate and not time out)
}
}
func withinExpectedRange(got time.Duration, expected time.Duration) bool {
return got >= expected/2 && got <= 3*expected/2
}
func TestNodeUpdateRetryTimeout(t *testing.T) {
for _, tc := range []struct {
count int
want time.Duration
}{
{count: 0, want: 250 * time.Millisecond},
{count: 1, want: 500 * time.Millisecond},
{count: 2, want: 1000 * time.Millisecond},
{count: 3, want: 2000 * time.Millisecond},
{count: 50, want: 5000 * time.Millisecond},
} {
t.Run(fmt.Sprintf("count %d", tc.count), func(t *testing.T) {
if got := nodeUpdateRetryTimeout(tc.count); !withinExpectedRange(got, tc.want) {
t.Errorf("nodeUpdateRetryTimeout(tc.count) = %v; want %v", got, tc.want)
}
})
}
}

View File

@@ -209,7 +209,7 @@ func (c *Controller) onDelete(node *v1.Node) error {
syncer.Delete(node)
delete(c.syncers, node.Name)
} else {
glog.Warning("Node %q was already deleted", node.Name)
glog.Warningf("Node %q was already deleted", node.Name)
}
return nil

View File

@@ -7,8 +7,8 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
],
)
@@ -19,9 +19,9 @@ go_test(
deps = [
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
"//pkg/controller/nodeipam/ipam/test:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
],
)

View File

@@ -110,8 +110,11 @@ func NewNodeIpamController(
glog.Fatal("Controller: Must specify --cluster-cidr if --allocate-node-cidrs is set")
}
mask := clusterCIDR.Mask
if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize {
glog.Fatal("Controller: Invalid --cluster-cidr, mask size of cluster CIDR must be less than --node-cidr-mask-size")
if allocatorType != ipam.CloudAllocatorType {
// Cloud CIDR allocator does not rely on clusterCIDR or nodeCIDRMaskSize for allocation.
if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize {
glog.Fatal("Controller: Invalid --cluster-cidr, mask size of cluster CIDR must be less than --node-cidr-mask-size")
}
}
ic := &Controller{

View File

@@ -0,0 +1,100 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodeipam
import (
"net"
"os"
"os/exec"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
"k8s.io/kubernetes/pkg/controller/testutil"
)
func newTestNodeIpamController(clusterCIDR, serviceCIDR *net.IPNet, nodeCIDRMaskSize int, allocatorType ipam.CIDRAllocatorType) (*Controller, error) {
clientSet := fake.NewSimpleClientset()
fakeNodeHandler := &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "node0"}},
},
Clientset: fake.NewSimpleClientset(),
}
fakeClient := &fake.Clientset{}
fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc())
fakeNodeInformer := fakeInformerFactory.Core().V1().Nodes()
for _, node := range fakeNodeHandler.Existing {
fakeNodeInformer.Informer().GetStore().Add(node)
}
fakeCloud := &fakecloud.FakeCloud{}
return NewNodeIpamController(
fakeNodeInformer, fakeCloud, clientSet,
clusterCIDR, serviceCIDR, nodeCIDRMaskSize, allocatorType,
)
}
// TestNewNodeIpamControllerWithCIDRMasks tests if the controller can be
// created with combinations of network CIDRs and masks.
func TestNewNodeIpamControllerWithCIDRMasks(t *testing.T) {
for _, tc := range []struct {
desc string
clusterCIDR string
serviceCIDR string
maskSize int
allocatorType ipam.CIDRAllocatorType
wantFatal bool
}{
{"valid_range_allocator", "10.0.0.0/21", "10.1.0.0/21", 24, ipam.RangeAllocatorType, false},
{"valid_cloud_allocator", "10.0.0.0/21", "10.1.0.0/21", 24, ipam.CloudAllocatorType, false},
{"invalid_cluster_CIDR", "invalid", "10.1.0.0/21", 24, ipam.CloudAllocatorType, true},
{"valid_CIDR_smaller_than_mask_cloud_allocator", "10.0.0.0/26", "10.1.0.0/21", 24, ipam.CloudAllocatorType, false},
{"invalid_CIDR_smaller_than_mask_other_allocators", "10.0.0.0/26", "10.1.0.0/21", 24, ipam.IPAMFromCloudAllocatorType, true},
} {
t.Run(tc.desc, func(t *testing.T) {
_, clusterCIDRIpNet, _ := net.ParseCIDR(tc.clusterCIDR)
_, serviceCIDRIpNet, _ := net.ParseCIDR(tc.serviceCIDR)
if os.Getenv("EXIT_ON_FATAL") == "1" {
// This is the subprocess which runs the actual code.
newTestNodeIpamController(clusterCIDRIpNet, serviceCIDRIpNet, tc.maskSize, tc.allocatorType)
return
}
// This is the host process that monitors the exit code of the subprocess.
cmd := exec.Command(os.Args[0], "-test.run=TestNewNodeIpamControllerWithCIDRMasks/"+tc.desc)
cmd.Env = append(os.Environ(), "EXIT_ON_FATAL=1")
err := cmd.Run()
var gotFatal bool
if err != nil {
exitErr, ok := err.(*exec.ExitError)
if !ok {
t.Fatalf("Failed to run subprocess: %v", err)
}
gotFatal = !exitErr.Success()
}
if gotFatal != tc.wantFatal {
t.Errorf("newTestNodeIpamController(%v, %v, %v, %v) : gotFatal = %t ; wantFatal = %t", clusterCIDRIpNet, serviceCIDRIpNet, tc.maskSize, tc.allocatorType, gotFatal, tc.wantFatal)
}
})
}
}