Add generated file
This PR adds generated files under pkg/client and vendor folder.
This commit is contained in:
40
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync/BUILD
generated
vendored
Normal file
40
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync/BUILD
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["sync.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["sync_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam/test:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
381
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync/sync.go
generated
vendored
Normal file
381
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync/sync.go
generated
vendored
Normal file
@@ -0,0 +1,381 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
||||
)
|
||||
|
||||
const (
|
||||
// InvalidPodCIDR is the event recorded when a node is found with an
|
||||
// invalid PodCIDR.
|
||||
InvalidPodCIDR = "CloudCIDRAllocatorInvalidPodCIDR"
|
||||
// InvalidModeEvent is the event recorded when the CIDR range cannot be
|
||||
// sync'd due to the cluster running in the wrong mode.
|
||||
InvalidModeEvent = "CloudCIDRAllocatorInvalidMode"
|
||||
// MismatchEvent is the event recorded when the CIDR range allocated in the
|
||||
// node spec does not match what has been allocated in the cloud.
|
||||
MismatchEvent = "CloudCIDRAllocatorMismatch"
|
||||
)
|
||||
|
||||
// cloudAlias is the interface to the cloud platform APIs.
|
||||
type cloudAlias interface {
|
||||
// Alias returns the IP alias for the node.
|
||||
Alias(ctx context.Context, nodeName string) (*net.IPNet, error)
|
||||
// AddAlias adds an alias to the node.
|
||||
AddAlias(ctx context.Context, nodeName string, cidrRange *net.IPNet) error
|
||||
}
|
||||
|
||||
// kubeAPI is the interface to the Kubernetes APIs.
|
||||
type kubeAPI interface {
|
||||
// Node returns the spec for the Node object.
|
||||
Node(ctx context.Context, name string) (*v1.Node, error)
|
||||
// UpdateNodePodCIDR updates the PodCIDR in the Node spec.
|
||||
UpdateNodePodCIDR(ctx context.Context, node *v1.Node, cidrRange *net.IPNet) error
|
||||
// UpdateNodeNetworkUnavailable updates the network unavailable status for the node.
|
||||
UpdateNodeNetworkUnavailable(nodeName string, unavailable bool) error
|
||||
// EmitNodeWarningEvent emits an event for the given node.
|
||||
EmitNodeWarningEvent(nodeName, reason, fmt string, args ...interface{})
|
||||
}
|
||||
|
||||
// controller is the interface to the controller.
|
||||
type controller interface {
|
||||
// ReportResult updates the controller with the result of the latest
|
||||
// sync operation.
|
||||
ReportResult(err error)
|
||||
// ResyncTimeout returns the amount of time to wait before retrying
|
||||
// a sync with a node.
|
||||
ResyncTimeout() time.Duration
|
||||
}
|
||||
|
||||
// NodeSyncMode is the mode the cloud CIDR allocator runs in.
|
||||
type NodeSyncMode string
|
||||
|
||||
var (
|
||||
// SyncFromCloud is the mode that synchronizes the IP allocation from the cloud
|
||||
// platform to the node.
|
||||
SyncFromCloud NodeSyncMode = "SyncFromCloud"
|
||||
// SyncFromCluster is the mode that synchronizes the IP allocation determined
|
||||
// by the k8s controller to the cloud provider.
|
||||
SyncFromCluster NodeSyncMode = "SyncFromCluster"
|
||||
)
|
||||
|
||||
// IsValidMode returns true if the given mode is valid.
|
||||
func IsValidMode(m NodeSyncMode) bool {
|
||||
switch m {
|
||||
case SyncFromCloud:
|
||||
case SyncFromCluster:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// NodeSync synchronizes the state for a single node in the cluster.
|
||||
type NodeSync struct {
|
||||
c controller
|
||||
cloudAlias cloudAlias
|
||||
kubeAPI kubeAPI
|
||||
mode NodeSyncMode
|
||||
nodeName string
|
||||
opChan chan syncOp
|
||||
set *cidrset.CidrSet
|
||||
}
|
||||
|
||||
// New returns a new syncer for a given node.
|
||||
func New(c controller, cloudAlias cloudAlias, kubeAPI kubeAPI, mode NodeSyncMode, nodeName string, set *cidrset.CidrSet) *NodeSync {
|
||||
return &NodeSync{
|
||||
c: c,
|
||||
cloudAlias: cloudAlias,
|
||||
kubeAPI: kubeAPI,
|
||||
mode: mode,
|
||||
nodeName: nodeName,
|
||||
opChan: make(chan syncOp, 1),
|
||||
set: set,
|
||||
}
|
||||
}
|
||||
|
||||
// Loop runs the sync loop for a given node. done is an optional channel that
|
||||
// is closed when the Loop() returns.
|
||||
func (sync *NodeSync) Loop(done chan struct{}) {
|
||||
glog.V(2).Infof("Starting sync loop for node %q", sync.nodeName)
|
||||
|
||||
defer func() {
|
||||
if done != nil {
|
||||
close(done)
|
||||
}
|
||||
}()
|
||||
|
||||
timeout := sync.c.ResyncTimeout()
|
||||
delayTimer := time.NewTimer(timeout)
|
||||
glog.V(4).Infof("Resync node %q in %v", sync.nodeName, timeout)
|
||||
|
||||
for {
|
||||
select {
|
||||
case op, more := <-sync.opChan:
|
||||
if !more {
|
||||
glog.V(2).Infof("Stopping sync loop")
|
||||
return
|
||||
}
|
||||
sync.c.ReportResult(op.run(sync))
|
||||
if !delayTimer.Stop() {
|
||||
<-delayTimer.C
|
||||
}
|
||||
case <-delayTimer.C:
|
||||
glog.V(4).Infof("Running resync for node %q", sync.nodeName)
|
||||
sync.c.ReportResult((&updateOp{}).run(sync))
|
||||
}
|
||||
|
||||
timeout := sync.c.ResyncTimeout()
|
||||
delayTimer.Reset(timeout)
|
||||
glog.V(4).Infof("Resync node %q in %v", sync.nodeName, timeout)
|
||||
}
|
||||
}
|
||||
|
||||
// Update causes an update operation on the given node. If node is nil, then
|
||||
// the syncer will fetch the node spec from the API server before syncing.
|
||||
//
|
||||
// This method is safe to call from multiple goroutines.
|
||||
func (sync *NodeSync) Update(node *v1.Node) {
|
||||
sync.opChan <- &updateOp{node}
|
||||
}
|
||||
|
||||
// Delete performs the sync operations necessary to remove the node from the
|
||||
// IPAM state.
|
||||
//
|
||||
// This method is safe to call from multiple goroutines.
|
||||
func (sync *NodeSync) Delete(node *v1.Node) {
|
||||
sync.opChan <- &deleteOp{node}
|
||||
close(sync.opChan)
|
||||
}
|
||||
|
||||
// syncOp is the interface for generic sync operation.
|
||||
type syncOp interface {
|
||||
// run the requested sync operation.
|
||||
run(sync *NodeSync) error
|
||||
}
|
||||
|
||||
// updateOp handles creation and updates of a node.
|
||||
type updateOp struct {
|
||||
node *v1.Node
|
||||
}
|
||||
|
||||
func (op *updateOp) String() string {
|
||||
if op.node == nil {
|
||||
return fmt.Sprintf("updateOp(nil)")
|
||||
}
|
||||
return fmt.Sprintf("updateOp(%q,%v)", op.node.Name, op.node.Spec.PodCIDR)
|
||||
}
|
||||
|
||||
func (op *updateOp) run(sync *NodeSync) error {
|
||||
glog.V(3).Infof("Running updateOp %+v", op)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if op.node == nil {
|
||||
glog.V(3).Infof("Getting node spec for %q", sync.nodeName)
|
||||
node, err := sync.kubeAPI.Node(ctx, sync.nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting node %q spec: %v", sync.nodeName, err)
|
||||
return err
|
||||
}
|
||||
op.node = node
|
||||
}
|
||||
|
||||
aliasRange, err := sync.cloudAlias.Alias(ctx, sync.nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting cloud alias for node %q: %v", sync.nodeName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case op.node.Spec.PodCIDR == "" && aliasRange == nil:
|
||||
err = op.allocateRange(ctx, sync, op.node)
|
||||
case op.node.Spec.PodCIDR == "" && aliasRange != nil:
|
||||
err = op.updateNodeFromAlias(ctx, sync, op.node, aliasRange)
|
||||
case op.node.Spec.PodCIDR != "" && aliasRange == nil:
|
||||
err = op.updateAliasFromNode(ctx, sync, op.node)
|
||||
case op.node.Spec.PodCIDR != "" && aliasRange != nil:
|
||||
err = op.validateRange(ctx, sync, op.node, aliasRange)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// validateRange checks that the allocated range and the alias range
|
||||
// match.
|
||||
func (op *updateOp) validateRange(ctx context.Context, sync *NodeSync, node *v1.Node, aliasRange *net.IPNet) error {
|
||||
if node.Spec.PodCIDR != aliasRange.String() {
|
||||
glog.Errorf("Inconsistency detected between node PodCIDR and node alias (%v != %v)",
|
||||
node.Spec.PodCIDR, aliasRange)
|
||||
sync.kubeAPI.EmitNodeWarningEvent(node.Name, MismatchEvent,
|
||||
"Node.Spec.PodCIDR != cloud alias (%v != %v)", node.Spec.PodCIDR, aliasRange)
|
||||
// User intervention is required in this case, as this is most likely due
|
||||
// to the user mucking around with their VM aliases on the side.
|
||||
} else {
|
||||
glog.V(4).Infof("Node %q CIDR range %v is matches cloud assignment", node.Name, node.Spec.PodCIDR)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateNodeFromAlias updates the the node from the cloud allocated
|
||||
// alias.
|
||||
func (op *updateOp) updateNodeFromAlias(ctx context.Context, sync *NodeSync, node *v1.Node, aliasRange *net.IPNet) error {
|
||||
if sync.mode != SyncFromCloud {
|
||||
sync.kubeAPI.EmitNodeWarningEvent(node.Name, InvalidModeEvent,
|
||||
"Cannot sync from cloud in mode %q", sync.mode)
|
||||
return fmt.Errorf("cannot sync from cloud in mode %q", sync.mode)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Updating node spec with alias range, node.PodCIDR = %v", aliasRange)
|
||||
|
||||
if err := sync.set.Occupy(aliasRange); err != nil {
|
||||
glog.Errorf("Error occupying range %v for node %v", aliasRange, sync.nodeName)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.kubeAPI.UpdateNodePodCIDR(ctx, node, aliasRange); err != nil {
|
||||
glog.Errorf("Could not update node %q PodCIDR to %v: %v", node.Name, aliasRange, err)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Node %q PodCIDR set to %v", node.Name, aliasRange)
|
||||
|
||||
if err := sync.kubeAPI.UpdateNodeNetworkUnavailable(node.Name, false); err != nil {
|
||||
glog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Updated node %q PodCIDR from cloud alias %v", node.Name, aliasRange)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateAliasFromNode updates the cloud alias given the node allocation.
|
||||
func (op *updateOp) updateAliasFromNode(ctx context.Context, sync *NodeSync, node *v1.Node) error {
|
||||
if sync.mode != SyncFromCluster {
|
||||
sync.kubeAPI.EmitNodeWarningEvent(
|
||||
node.Name, InvalidModeEvent, "Cannot sync to cloud in mode %q", sync.mode)
|
||||
return fmt.Errorf("cannot sync to cloud in mode %q", sync.mode)
|
||||
}
|
||||
|
||||
_, aliasRange, err := net.ParseCIDR(node.Spec.PodCIDR)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not parse PodCIDR (%q) for node %q: %v",
|
||||
node.Spec.PodCIDR, node.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.set.Occupy(aliasRange); err != nil {
|
||||
glog.Errorf("Error occupying range %v for node %v", aliasRange, sync.nodeName)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.cloudAlias.AddAlias(ctx, node.Name, aliasRange); err != nil {
|
||||
glog.Errorf("Could not add alias %v for node %q: %v", aliasRange, node.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.kubeAPI.UpdateNodeNetworkUnavailable(node.Name, false); err != nil {
|
||||
glog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Updated node %q cloud alias with node spec, node.PodCIDR = %v",
|
||||
node.Name, node.Spec.PodCIDR)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// allocateRange allocates a new range and updates both the cloud
|
||||
// platform and the node allocation.
|
||||
func (op *updateOp) allocateRange(ctx context.Context, sync *NodeSync, node *v1.Node) error {
|
||||
if sync.mode != SyncFromCluster {
|
||||
sync.kubeAPI.EmitNodeWarningEvent(node.Name, InvalidModeEvent,
|
||||
"Cannot allocate CIDRs in mode %q", sync.mode)
|
||||
return fmt.Errorf("controller cannot allocate CIDRS in mode %q", sync.mode)
|
||||
}
|
||||
|
||||
cidrRange, err := sync.set.AllocateNext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If addAlias returns a hard error, cidrRange will be leaked as there
|
||||
// is no durable record of the range. The missing space will be
|
||||
// recovered on the next restart of the controller.
|
||||
if err := sync.cloudAlias.AddAlias(ctx, node.Name, cidrRange); err != nil {
|
||||
glog.Errorf("Could not add alias %v for node %q: %v", cidrRange, node.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.kubeAPI.UpdateNodePodCIDR(ctx, node, cidrRange); err != nil {
|
||||
glog.Errorf("Could not update node %q PodCIDR to %v: %v", node.Name, cidrRange, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.kubeAPI.UpdateNodeNetworkUnavailable(node.Name, false); err != nil {
|
||||
glog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Allocated PodCIDR %v for node %q", cidrRange, node.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteOp handles deletion of a node.
|
||||
type deleteOp struct {
|
||||
node *v1.Node
|
||||
}
|
||||
|
||||
func (op *deleteOp) String() string {
|
||||
if op.node == nil {
|
||||
return fmt.Sprintf("deleteOp(nil)")
|
||||
}
|
||||
return fmt.Sprintf("deleteOp(%q,%v)", op.node.Name, op.node.Spec.PodCIDR)
|
||||
}
|
||||
|
||||
func (op *deleteOp) run(sync *NodeSync) error {
|
||||
glog.V(3).Infof("Running deleteOp %+v", op)
|
||||
if op.node.Spec.PodCIDR == "" {
|
||||
glog.V(2).Infof("Node %q was deleted, node had no PodCIDR range assigned", op.node.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
_, cidrRange, err := net.ParseCIDR(op.node.Spec.PodCIDR)
|
||||
if err != nil {
|
||||
glog.Errorf("Deleted node %q has an invalid podCIDR %q: %v",
|
||||
op.node.Name, op.node.Spec.PodCIDR, err)
|
||||
sync.kubeAPI.EmitNodeWarningEvent(op.node.Name, InvalidPodCIDR,
|
||||
"Node %q has an invalid PodCIDR: %q", op.node.Name, op.node.Spec.PodCIDR)
|
||||
return nil
|
||||
}
|
||||
|
||||
sync.set.Release(cidrRange)
|
||||
glog.V(2).Infof("Node %q was deleted, releasing CIDR range %v",
|
||||
op.node.Name, op.node.Spec.PodCIDR)
|
||||
|
||||
return nil
|
||||
}
|
297
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync/sync_test.go
generated
vendored
Normal file
297
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync/sync_test.go
generated
vendored
Normal file
@@ -0,0 +1,297 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
_, clusterCIDRRange, _ = net.ParseCIDR("10.1.0.0/16")
|
||||
)
|
||||
|
||||
type fakeEvent struct {
|
||||
nodeName string
|
||||
reason string
|
||||
}
|
||||
|
||||
type fakeAPIs struct {
|
||||
aliasRange *net.IPNet
|
||||
aliasErr error
|
||||
addAliasErr error
|
||||
nodeRet *v1.Node
|
||||
nodeErr error
|
||||
updateNodeErr error
|
||||
resyncTimeout time.Duration
|
||||
reportChan chan struct{}
|
||||
|
||||
updateNodeNetworkUnavailableErr error
|
||||
|
||||
calls []string
|
||||
events []fakeEvent
|
||||
results []error
|
||||
}
|
||||
|
||||
func (f *fakeAPIs) Alias(ctx context.Context, nodeName string) (*net.IPNet, error) {
|
||||
f.calls = append(f.calls, fmt.Sprintf("alias %v", nodeName))
|
||||
return f.aliasRange, f.aliasErr
|
||||
}
|
||||
|
||||
func (f *fakeAPIs) AddAlias(ctx context.Context, nodeName string, cidrRange *net.IPNet) error {
|
||||
f.calls = append(f.calls, fmt.Sprintf("addAlias %v %v", nodeName, cidrRange))
|
||||
return f.addAliasErr
|
||||
}
|
||||
|
||||
func (f *fakeAPIs) Node(ctx context.Context, name string) (*v1.Node, error) {
|
||||
f.calls = append(f.calls, fmt.Sprintf("node %v", name))
|
||||
return f.nodeRet, f.nodeErr
|
||||
}
|
||||
|
||||
func (f *fakeAPIs) UpdateNodePodCIDR(ctx context.Context, node *v1.Node, cidrRange *net.IPNet) error {
|
||||
f.calls = append(f.calls, fmt.Sprintf("updateNode %v", node))
|
||||
return f.updateNodeErr
|
||||
}
|
||||
|
||||
func (f *fakeAPIs) UpdateNodeNetworkUnavailable(nodeName string, unavailable bool) error {
|
||||
f.calls = append(f.calls, fmt.Sprintf("updateNodeNetworkUnavailable %v %v", nodeName, unavailable))
|
||||
return f.updateNodeNetworkUnavailableErr
|
||||
}
|
||||
|
||||
func (f *fakeAPIs) EmitNodeWarningEvent(nodeName, reason, fmtStr string, args ...interface{}) {
|
||||
f.events = append(f.events, fakeEvent{nodeName, reason})
|
||||
}
|
||||
|
||||
func (f *fakeAPIs) ReportResult(err error) {
|
||||
glog.V(2).Infof("ReportResult %v", err)
|
||||
f.results = append(f.results, err)
|
||||
if f.reportChan != nil {
|
||||
f.reportChan <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fakeAPIs) ResyncTimeout() time.Duration {
|
||||
if f.resyncTimeout == 0 {
|
||||
return time.Second * 10000
|
||||
}
|
||||
return f.resyncTimeout
|
||||
}
|
||||
|
||||
func (f *fakeAPIs) dumpTrace() {
|
||||
for i, x := range f.calls {
|
||||
glog.Infof("trace %v: %v", i, x)
|
||||
}
|
||||
}
|
||||
|
||||
var nodeWithoutCIDRRange = &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node1"},
|
||||
}
|
||||
|
||||
var nodeWithCIDRRange = &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node1"},
|
||||
Spec: v1.NodeSpec{PodCIDR: "10.1.1.0/24"},
|
||||
}
|
||||
|
||||
func TestNodeSyncUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
mode NodeSyncMode
|
||||
node *v1.Node
|
||||
fake fakeAPIs
|
||||
|
||||
events []fakeEvent
|
||||
wantError bool
|
||||
}{
|
||||
{
|
||||
desc: "validate range ==",
|
||||
mode: SyncFromCloud,
|
||||
node: nodeWithCIDRRange,
|
||||
fake: fakeAPIs{
|
||||
aliasRange: test.MustParseCIDR(nodeWithCIDRRange.Spec.PodCIDR),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "validate range !=",
|
||||
mode: SyncFromCloud,
|
||||
node: nodeWithCIDRRange,
|
||||
fake: fakeAPIs{aliasRange: test.MustParseCIDR("192.168.0.0/24")},
|
||||
events: []fakeEvent{{"node1", "CloudCIDRAllocatorMismatch"}},
|
||||
},
|
||||
{
|
||||
desc: "update alias from node",
|
||||
mode: SyncFromCloud,
|
||||
node: nodeWithCIDRRange,
|
||||
events: []fakeEvent{{"node1", "CloudCIDRAllocatorInvalidMode"}},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
desc: "update alias from node",
|
||||
mode: SyncFromCluster,
|
||||
node: nodeWithCIDRRange,
|
||||
// XXX/bowei -- validation
|
||||
},
|
||||
{
|
||||
desc: "update node from alias",
|
||||
mode: SyncFromCloud,
|
||||
node: nodeWithoutCIDRRange,
|
||||
fake: fakeAPIs{aliasRange: test.MustParseCIDR("10.1.2.3/16")},
|
||||
// XXX/bowei -- validation
|
||||
},
|
||||
{
|
||||
desc: "update node from alias",
|
||||
mode: SyncFromCluster,
|
||||
node: nodeWithoutCIDRRange,
|
||||
fake: fakeAPIs{aliasRange: test.MustParseCIDR("10.1.2.3/16")},
|
||||
events: []fakeEvent{{"node1", "CloudCIDRAllocatorInvalidMode"}},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
desc: "allocate range",
|
||||
mode: SyncFromCloud,
|
||||
node: nodeWithoutCIDRRange,
|
||||
events: []fakeEvent{{"node1", "CloudCIDRAllocatorInvalidMode"}},
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
desc: "allocate range",
|
||||
mode: SyncFromCluster,
|
||||
node: nodeWithoutCIDRRange,
|
||||
},
|
||||
{
|
||||
desc: "update with node==nil",
|
||||
mode: SyncFromCluster,
|
||||
node: nil,
|
||||
fake: fakeAPIs{
|
||||
nodeRet: nodeWithCIDRRange,
|
||||
},
|
||||
wantError: false,
|
||||
},
|
||||
} {
|
||||
cidr, _ := cidrset.NewCIDRSet(clusterCIDRRange, 24)
|
||||
sync := New(&tc.fake, &tc.fake, &tc.fake, tc.mode, "node1", cidr)
|
||||
doneChan := make(chan struct{})
|
||||
|
||||
// Do a single step of the loop.
|
||||
go sync.Loop(doneChan)
|
||||
sync.Update(tc.node)
|
||||
close(sync.opChan)
|
||||
<-doneChan
|
||||
tc.fake.dumpTrace()
|
||||
|
||||
if !reflect.DeepEqual(tc.fake.events, tc.events) {
|
||||
t.Errorf("%v, %v; fake.events = %#v, want %#v", tc.desc, tc.mode, tc.fake.events, tc.events)
|
||||
}
|
||||
|
||||
var hasError bool
|
||||
for _, r := range tc.fake.results {
|
||||
hasError = hasError || (r != nil)
|
||||
}
|
||||
if hasError != tc.wantError {
|
||||
t.Errorf("%v, %v; hasError = %t, errors = %v, want %t",
|
||||
tc.desc, tc.mode, hasError, tc.fake.events, tc.wantError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeSyncResync(t *testing.T) {
|
||||
fake := &fakeAPIs{
|
||||
nodeRet: nodeWithCIDRRange,
|
||||
resyncTimeout: time.Millisecond,
|
||||
reportChan: make(chan struct{}),
|
||||
}
|
||||
cidr, _ := cidrset.NewCIDRSet(clusterCIDRRange, 24)
|
||||
sync := New(fake, fake, fake, SyncFromCluster, "node1", cidr)
|
||||
doneChan := make(chan struct{})
|
||||
|
||||
go sync.Loop(doneChan)
|
||||
<-fake.reportChan
|
||||
close(sync.opChan)
|
||||
// Unblock loop().
|
||||
go func() {
|
||||
<-fake.reportChan
|
||||
}()
|
||||
<-doneChan
|
||||
fake.dumpTrace()
|
||||
}
|
||||
|
||||
func TestNodeSyncDelete(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
mode NodeSyncMode
|
||||
node *v1.Node
|
||||
fake fakeAPIs
|
||||
}{
|
||||
{
|
||||
desc: "delete",
|
||||
mode: SyncFromCluster,
|
||||
node: nodeWithCIDRRange,
|
||||
},
|
||||
{
|
||||
desc: "delete without CIDR range",
|
||||
mode: SyncFromCluster,
|
||||
node: nodeWithoutCIDRRange,
|
||||
},
|
||||
{
|
||||
desc: "delete with invalid CIDR range",
|
||||
mode: SyncFromCluster,
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node1"},
|
||||
Spec: v1.NodeSpec{PodCIDR: "invalid"},
|
||||
},
|
||||
},
|
||||
} {
|
||||
cidr, _ := cidrset.NewCIDRSet(clusterCIDRRange, 24)
|
||||
sync := New(&tc.fake, &tc.fake, &tc.fake, tc.mode, "node1", cidr)
|
||||
doneChan := make(chan struct{})
|
||||
|
||||
// Do a single step of the loop.
|
||||
go sync.Loop(doneChan)
|
||||
sync.Delete(tc.node)
|
||||
<-doneChan
|
||||
tc.fake.dumpTrace()
|
||||
|
||||
/*
|
||||
if !reflect.DeepEqual(tc.fake.events, tc.events) {
|
||||
t.Errorf("%v, %v; fake.events = %#v, want %#v", tc.desc, tc.mode, tc.fake.events, tc.events)
|
||||
}
|
||||
|
||||
var hasError bool
|
||||
for _, r := range tc.fake.results {
|
||||
hasError = hasError || (r != nil)
|
||||
}
|
||||
if hasError != tc.wantError {
|
||||
t.Errorf("%v, %v; hasError = %t, errors = %v, want %t",
|
||||
tc.desc, tc.mode, hasError, tc.fake.events, tc.wantError)
|
||||
}
|
||||
*/
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user