add prune and remove unused packages

This commit is contained in:
Michelle Au
2019-03-08 14:54:43 -08:00
parent f59b58d164
commit 8c0accad66
17240 changed files with 27 additions and 4750030 deletions

View File

@@ -1,7 +0,0 @@
approvers:
- sig-auth-authenticators-approvers
reviewers:
- sig-auth-authenticators-reviewers
labels:
- sig/auth

View File

@@ -1,69 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auth_test
import (
"io/ioutil"
"os"
"reflect"
"testing"
clientauth "k8s.io/client-go/tools/auth"
)
func TestLoadFromFile(t *testing.T) {
loadAuthInfoTests := []struct {
authData string
authInfo *clientauth.Info
expectErr bool
}{
{
`{"user": "user", "password": "pass"}`,
&clientauth.Info{User: "user", Password: "pass"},
false,
},
{
"", nil, true,
},
}
for _, loadAuthInfoTest := range loadAuthInfoTests {
tt := loadAuthInfoTest
aifile, err := ioutil.TempFile("", "testAuthInfo")
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if tt.authData != "missing" {
defer os.Remove(aifile.Name())
defer aifile.Close()
_, err = aifile.WriteString(tt.authData)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
} else {
aifile.Close()
os.Remove(aifile.Name())
}
authInfo, err := clientauth.LoadFromFile(aifile.Name())
gotErr := err != nil
if gotErr != tt.expectErr {
t.Errorf("expected errorness: %v, actual errorness: %v", tt.expectErr, gotErr)
}
if !reflect.DeepEqual(authInfo, tt.authInfo) {
t.Errorf("Expected %v, got %v", tt.authInfo, authInfo)
}
}
}

View File

@@ -1,50 +0,0 @@
approvers:
- thockin
- lavalamp
- smarterclayton
- wojtek-t
- deads2k
- caesarxuchao
- liggitt
- ncdc
reviewers:
- thockin
- lavalamp
- smarterclayton
- wojtek-t
- deads2k
- brendandburns
- derekwaynecarr
- caesarxuchao
- mikedanese
- liggitt
- nikhiljindal
- erictune
- davidopp
- pmorie
- kargakis
- janetkuo
- justinsb
- eparis
- soltysh
- jsafrane
- dims
- madhusudancs
- hongchaodeng
- krousey
- markturansky
- fgrzadkowski
- xiang90
- mml
- ingvagabund
- resouer
- jessfraz
- david-mcmahon
- mfojtik
- '249043822'
- lixiaobing10051267
- ddysher
- mqliang
- feihujiang
- sdminonne
- ncdc

View File

@@ -1,405 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"fmt"
"math/rand"
"sync"
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
fcache "k8s.io/client-go/tools/cache/testing"
"github.com/google/gofuzz"
)
func Example() {
// source simulates an apiserver object endpoint.
source := fcache.NewFakeControllerSource()
// This will hold the downstream state, as we know it.
downstream := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
// This will hold incoming changes. Note how we pass downstream in as a
// KeyLister, that way resync operations will result in the correct set
// of update/delete deltas.
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, downstream)
// Let's do threadsafe output to get predictable test results.
deletionCounter := make(chan string, 1000)
cfg := &Config{
Queue: fifo,
ListerWatcher: source,
ObjectType: &v1.Pod{},
FullResyncPeriod: time.Millisecond * 100,
RetryOnError: false,
// Let's implement a simple controller that just deletes
// everything that comes in.
Process: func(obj interface{}) error {
// Obj is from the Pop method of the Queue we make above.
newest := obj.(Deltas).Newest()
if newest.Type != Deleted {
// Update our downstream store.
err := downstream.Add(newest.Object)
if err != nil {
return err
}
// Delete this object.
source.Delete(newest.Object.(runtime.Object))
} else {
// Update our downstream store.
err := downstream.Delete(newest.Object)
if err != nil {
return err
}
// fifo's KeyOf is easiest, because it handles
// DeletedFinalStateUnknown markers.
key, err := fifo.KeyOf(newest.Object)
if err != nil {
return err
}
// Report this deletion.
deletionCounter <- key
}
return nil
},
}
// Create the controller and run it until we close stop.
stop := make(chan struct{})
defer close(stop)
go New(cfg).Run(stop)
// Let's add a few objects to the source.
testIDs := []string{"a-hello", "b-controller", "c-framework"}
for _, name := range testIDs {
// Note that these pods are not valid-- the fake source doesn't
// call validation or anything.
source.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: name}})
}
// Let's wait for the controller to process the things we just added.
outputSet := sets.String{}
for i := 0; i < len(testIDs); i++ {
outputSet.Insert(<-deletionCounter)
}
for _, key := range outputSet.List() {
fmt.Println(key)
}
// Output:
// a-hello
// b-controller
// c-framework
}
func ExampleNewInformer() {
// source simulates an apiserver object endpoint.
source := fcache.NewFakeControllerSource()
// Let's do threadsafe output to get predictable test results.
deletionCounter := make(chan string, 1000)
// Make a controller that immediately deletes anything added to it, and
// logs anything deleted.
_, controller := NewInformer(
source,
&v1.Pod{},
time.Millisecond*100,
ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
source.Delete(obj.(runtime.Object))
},
DeleteFunc: func(obj interface{}) {
key, err := DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
key = "oops something went wrong with the key"
}
// Report this deletion.
deletionCounter <- key
},
},
)
// Run the controller and run it until we close stop.
stop := make(chan struct{})
defer close(stop)
go controller.Run(stop)
// Let's add a few objects to the source.
testIDs := []string{"a-hello", "b-controller", "c-framework"}
for _, name := range testIDs {
// Note that these pods are not valid-- the fake source doesn't
// call validation or anything.
source.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: name}})
}
// Let's wait for the controller to process the things we just added.
outputSet := sets.String{}
for i := 0; i < len(testIDs); i++ {
outputSet.Insert(<-deletionCounter)
}
for _, key := range outputSet.List() {
fmt.Println(key)
}
// Output:
// a-hello
// b-controller
// c-framework
}
func TestHammerController(t *testing.T) {
// This test executes a bunch of requests through the fake source and
// controller framework to make sure there's no locking/threading
// errors. If an error happens, it should hang forever or trigger the
// race detector.
// source simulates an apiserver object endpoint.
source := fcache.NewFakeControllerSource()
// Let's do threadsafe output to get predictable test results.
outputSetLock := sync.Mutex{}
// map of key to operations done on the key
outputSet := map[string][]string{}
recordFunc := func(eventType string, obj interface{}) {
key, err := DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
t.Errorf("something wrong with key: %v", err)
key = "oops something went wrong with the key"
}
// Record some output when items are deleted.
outputSetLock.Lock()
defer outputSetLock.Unlock()
outputSet[key] = append(outputSet[key], eventType)
}
// Make a controller which just logs all the changes it gets.
_, controller := NewInformer(
source,
&v1.Pod{},
time.Millisecond*100,
ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { recordFunc("add", obj) },
UpdateFunc: func(oldObj, newObj interface{}) { recordFunc("update", newObj) },
DeleteFunc: func(obj interface{}) { recordFunc("delete", obj) },
},
)
if controller.HasSynced() {
t.Errorf("Expected HasSynced() to return false before we started the controller")
}
// Run the controller and run it until we close stop.
stop := make(chan struct{})
go controller.Run(stop)
// Let's wait for the controller to do its initial sync
wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
return controller.HasSynced(), nil
})
if !controller.HasSynced() {
t.Errorf("Expected HasSynced() to return true after the initial sync")
}
wg := sync.WaitGroup{}
const threads = 3
wg.Add(threads)
for i := 0; i < threads; i++ {
go func() {
defer wg.Done()
// Let's add a few objects to the source.
currentNames := sets.String{}
rs := rand.NewSource(rand.Int63())
f := fuzz.New().NilChance(.5).NumElements(0, 2).RandSource(rs)
r := rand.New(rs) // Mustn't use r and f concurrently!
for i := 0; i < 100; i++ {
var name string
var isNew bool
if currentNames.Len() == 0 || r.Intn(3) == 1 {
f.Fuzz(&name)
isNew = true
} else {
l := currentNames.List()
name = l[r.Intn(len(l))]
}
pod := &v1.Pod{}
f.Fuzz(pod)
pod.ObjectMeta.Name = name
pod.ObjectMeta.Namespace = "default"
// Add, update, or delete randomly.
// Note that these pods are not valid-- the fake source doesn't
// call validation or perform any other checking.
if isNew {
currentNames.Insert(name)
source.Add(pod)
continue
}
switch r.Intn(2) {
case 0:
currentNames.Insert(name)
source.Modify(pod)
case 1:
currentNames.Delete(name)
source.Delete(pod)
}
}
}()
}
wg.Wait()
// Let's wait for the controller to finish processing the things we just added.
// TODO: look in the queue to see how many items need to be processed.
time.Sleep(100 * time.Millisecond)
close(stop)
// TODO: Verify that no goroutines were leaked here and that everything shut
// down cleanly.
outputSetLock.Lock()
t.Logf("got: %#v", outputSet)
}
func TestUpdate(t *testing.T) {
// This test is going to exercise the various paths that result in a
// call to update.
// source simulates an apiserver object endpoint.
source := fcache.NewFakeControllerSource()
const (
FROM = "from"
TO = "to"
)
// These are the transitions we expect to see; because this is
// asynchronous, there are a lot of valid possibilities.
type pair struct{ from, to string }
allowedTransitions := map[pair]bool{
{FROM, TO}: true,
// Because a resync can happen when we've already observed one
// of the above but before the item is deleted.
{TO, TO}: true,
// Because a resync could happen before we observe an update.
{FROM, FROM}: true,
}
pod := func(name, check string, final bool) *v1.Pod {
p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{"check": check},
},
}
if final {
p.Labels["final"] = "true"
}
return p
}
deletePod := func(p *v1.Pod) bool {
return p.Labels["final"] == "true"
}
tests := []func(string){
func(name string) {
name = "a-" + name
source.Add(pod(name, FROM, false))
source.Modify(pod(name, TO, true))
},
}
const threads = 3
var testDoneWG sync.WaitGroup
testDoneWG.Add(threads * len(tests))
// Make a controller that deletes things once it observes an update.
// It calls Done() on the wait group on deletions so we can tell when
// everything we've added has been deleted.
watchCh := make(chan struct{})
_, controller := NewInformer(
&testLW{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
watch, err := source.Watch(options)
close(watchCh)
return watch, err
},
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return source.List(options)
},
},
&v1.Pod{},
0,
ResourceEventHandlerFuncs{
UpdateFunc: func(oldObj, newObj interface{}) {
o, n := oldObj.(*v1.Pod), newObj.(*v1.Pod)
from, to := o.Labels["check"], n.Labels["check"]
if !allowedTransitions[pair{from, to}] {
t.Errorf("observed transition %q -> %q for %v", from, to, n.Name)
}
if deletePod(n) {
source.Delete(n)
}
},
DeleteFunc: func(obj interface{}) {
testDoneWG.Done()
},
},
)
// Run the controller and run it until we close stop.
// Once Run() is called, calls to testDoneWG.Done() might start, so
// all testDoneWG.Add() calls must happen before this point
stop := make(chan struct{})
go controller.Run(stop)
<-watchCh
// run every test a few times, in parallel
var wg sync.WaitGroup
wg.Add(threads * len(tests))
for i := 0; i < threads; i++ {
for j, f := range tests {
go func(name string, f func(string)) {
defer wg.Done()
f(name)
}(fmt.Sprintf("%v-%v", i, j), f)
}
}
wg.Wait()
// Let's wait for the controller to process the things we just added.
testDoneWG.Wait()
close(stop)
}

View File

@@ -1,492 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"fmt"
"reflect"
"testing"
"time"
)
// helper function to reduce stuttering
func testPop(f *DeltaFIFO) testFifoObject {
return Pop(f).(Deltas).Newest().Object.(testFifoObject)
}
// keyLookupFunc adapts a raw function to be a KeyLookup.
type keyLookupFunc func() []testFifoObject
// ListKeys just calls kl.
func (kl keyLookupFunc) ListKeys() []string {
result := []string{}
for _, fifoObj := range kl() {
result = append(result, fifoObj.name)
}
return result
}
// GetByKey returns the key if it exists in the list returned by kl.
func (kl keyLookupFunc) GetByKey(key string) (interface{}, bool, error) {
for _, v := range kl() {
if v.name == key {
return v, true, nil
}
}
return nil, false, nil
}
func TestDeltaFIFO_basic(t *testing.T) {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
const amount = 500
go func() {
for i := 0; i < amount; i++ {
f.Add(mkFifoObj(string([]rune{'a', rune(i)}), i+1))
}
}()
go func() {
for u := uint64(0); u < amount; u++ {
f.Add(mkFifoObj(string([]rune{'b', rune(u)}), u+1))
}
}()
lastInt := int(0)
lastUint := uint64(0)
for i := 0; i < amount*2; i++ {
switch obj := testPop(f).val.(type) {
case int:
if obj <= lastInt {
t.Errorf("got %v (int) out of order, last was %v", obj, lastInt)
}
lastInt = obj
case uint64:
if obj <= lastUint {
t.Errorf("got %v (uint) out of order, last was %v", obj, lastUint)
} else {
lastUint = obj
}
default:
t.Fatalf("unexpected type %#v", obj)
}
}
}
func TestDeltaFIFO_requeueOnPop(t *testing.T) {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
f.Add(mkFifoObj("foo", 10))
_, err := f.Pop(func(obj interface{}) error {
if obj.(Deltas)[0].Object.(testFifoObject).name != "foo" {
t.Fatalf("unexpected object: %#v", obj)
}
return ErrRequeue{Err: nil}
})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if _, ok, err := f.GetByKey("foo"); !ok || err != nil {
t.Fatalf("object should have been requeued: %t %v", ok, err)
}
_, err = f.Pop(func(obj interface{}) error {
if obj.(Deltas)[0].Object.(testFifoObject).name != "foo" {
t.Fatalf("unexpected object: %#v", obj)
}
return ErrRequeue{Err: fmt.Errorf("test error")}
})
if err == nil || err.Error() != "test error" {
t.Fatalf("unexpected error: %v", err)
}
if _, ok, err := f.GetByKey("foo"); !ok || err != nil {
t.Fatalf("object should have been requeued: %t %v", ok, err)
}
_, err = f.Pop(func(obj interface{}) error {
if obj.(Deltas)[0].Object.(testFifoObject).name != "foo" {
t.Fatalf("unexpected object: %#v", obj)
}
return nil
})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if _, ok, err := f.GetByKey("foo"); ok || err != nil {
t.Fatalf("object should have been removed: %t %v", ok, err)
}
}
func TestDeltaFIFO_addUpdate(t *testing.T) {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
f.Add(mkFifoObj("foo", 10))
f.Update(mkFifoObj("foo", 12))
f.Delete(mkFifoObj("foo", 15))
if e, a := []interface{}{mkFifoObj("foo", 15)}, f.List(); !reflect.DeepEqual(e, a) {
t.Errorf("Expected %+v, got %+v", e, a)
}
if e, a := []string{"foo"}, f.ListKeys(); !reflect.DeepEqual(e, a) {
t.Errorf("Expected %+v, got %+v", e, a)
}
got := make(chan testFifoObject, 2)
go func() {
for {
obj := testPop(f)
t.Logf("got a thing %#v", obj)
t.Logf("D len: %v", len(f.queue))
got <- obj
}
}()
first := <-got
if e, a := 15, first.val; e != a {
t.Errorf("Didn't get updated value (%v), got %v", e, a)
}
select {
case unexpected := <-got:
t.Errorf("Got second value %v", unexpected.val)
case <-time.After(50 * time.Millisecond):
}
_, exists, _ := f.Get(mkFifoObj("foo", ""))
if exists {
t.Errorf("item did not get removed")
}
}
func TestDeltaFIFO_enqueueingNoLister(t *testing.T) {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
f.Add(mkFifoObj("foo", 10))
f.Update(mkFifoObj("bar", 15))
f.Add(mkFifoObj("qux", 17))
f.Delete(mkFifoObj("qux", 18))
// This delete does not enqueue anything because baz doesn't exist.
f.Delete(mkFifoObj("baz", 20))
expectList := []int{10, 15, 18}
for _, expect := range expectList {
if e, a := expect, testPop(f).val; e != a {
t.Errorf("Didn't get updated value (%v), got %v", e, a)
}
}
if e, a := 0, len(f.items); e != a {
t.Errorf("queue unexpectedly not empty: %v != %v\n%#v", e, a, f.items)
}
}
func TestDeltaFIFO_enqueueingWithLister(t *testing.T) {
f := NewDeltaFIFO(
testFifoObjectKeyFunc,
keyLookupFunc(func() []testFifoObject {
return []testFifoObject{mkFifoObj("foo", 5), mkFifoObj("bar", 6), mkFifoObj("baz", 7)}
}),
)
f.Add(mkFifoObj("foo", 10))
f.Update(mkFifoObj("bar", 15))
// This delete does enqueue the deletion, because "baz" is in the key lister.
f.Delete(mkFifoObj("baz", 20))
expectList := []int{10, 15, 20}
for _, expect := range expectList {
if e, a := expect, testPop(f).val; e != a {
t.Errorf("Didn't get updated value (%v), got %v", e, a)
}
}
if e, a := 0, len(f.items); e != a {
t.Errorf("queue unexpectedly not empty: %v != %v", e, a)
}
}
func TestDeltaFIFO_addReplace(t *testing.T) {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
f.Add(mkFifoObj("foo", 10))
f.Replace([]interface{}{mkFifoObj("foo", 15)}, "0")
got := make(chan testFifoObject, 2)
go func() {
for {
got <- testPop(f)
}
}()
first := <-got
if e, a := 15, first.val; e != a {
t.Errorf("Didn't get updated value (%v), got %v", e, a)
}
select {
case unexpected := <-got:
t.Errorf("Got second value %v", unexpected.val)
case <-time.After(50 * time.Millisecond):
}
_, exists, _ := f.Get(mkFifoObj("foo", ""))
if exists {
t.Errorf("item did not get removed")
}
}
func TestDeltaFIFO_ResyncNonExisting(t *testing.T) {
f := NewDeltaFIFO(
testFifoObjectKeyFunc,
keyLookupFunc(func() []testFifoObject {
return []testFifoObject{mkFifoObj("foo", 5)}
}),
)
f.Delete(mkFifoObj("foo", 10))
f.Resync()
deltas := f.items["foo"]
if len(deltas) != 1 {
t.Fatalf("unexpected deltas length: %v", deltas)
}
if deltas[0].Type != Deleted {
t.Errorf("unexpected delta: %v", deltas[0])
}
}
func TestDeltaFIFO_DeleteExistingNonPropagated(t *testing.T) {
f := NewDeltaFIFO(
testFifoObjectKeyFunc,
keyLookupFunc(func() []testFifoObject {
return []testFifoObject{}
}),
)
f.Add(mkFifoObj("foo", 5))
f.Delete(mkFifoObj("foo", 6))
deltas := f.items["foo"]
if len(deltas) != 2 {
t.Fatalf("unexpected deltas length: %v", deltas)
}
if deltas[len(deltas)-1].Type != Deleted {
t.Errorf("unexpected delta: %v", deltas[len(deltas)-1])
}
}
func TestDeltaFIFO_ReplaceMakesDeletions(t *testing.T) {
f := NewDeltaFIFO(
testFifoObjectKeyFunc,
keyLookupFunc(func() []testFifoObject {
return []testFifoObject{mkFifoObj("foo", 5), mkFifoObj("bar", 6), mkFifoObj("baz", 7)}
}),
)
f.Delete(mkFifoObj("baz", 10))
f.Replace([]interface{}{mkFifoObj("foo", 5)}, "0")
expectedList := []Deltas{
{{Deleted, mkFifoObj("baz", 10)}},
{{Sync, mkFifoObj("foo", 5)}},
// Since "bar" didn't have a delete event and wasn't in the Replace list
// it should get a tombstone key with the right Obj.
{{Deleted, DeletedFinalStateUnknown{Key: "bar", Obj: mkFifoObj("bar", 6)}}},
}
for _, expected := range expectedList {
cur := Pop(f).(Deltas)
if e, a := expected, cur; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %#v, got %#v", e, a)
}
}
}
func TestDeltaFIFO_UpdateResyncRace(t *testing.T) {
f := NewDeltaFIFO(
testFifoObjectKeyFunc,
keyLookupFunc(func() []testFifoObject {
return []testFifoObject{mkFifoObj("foo", 5)}
}),
)
f.Update(mkFifoObj("foo", 6))
f.Resync()
expectedList := []Deltas{
{{Updated, mkFifoObj("foo", 6)}},
}
for _, expected := range expectedList {
cur := Pop(f).(Deltas)
if e, a := expected, cur; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %#v, got %#v", e, a)
}
}
}
func TestDeltaFIFO_HasSyncedCorrectOnDeletion(t *testing.T) {
f := NewDeltaFIFO(
testFifoObjectKeyFunc,
keyLookupFunc(func() []testFifoObject {
return []testFifoObject{mkFifoObj("foo", 5), mkFifoObj("bar", 6), mkFifoObj("baz", 7)}
}),
)
f.Replace([]interface{}{mkFifoObj("foo", 5)}, "0")
expectedList := []Deltas{
{{Sync, mkFifoObj("foo", 5)}},
// Since "bar" didn't have a delete event and wasn't in the Replace list
// it should get a tombstone key with the right Obj.
{{Deleted, DeletedFinalStateUnknown{Key: "bar", Obj: mkFifoObj("bar", 6)}}},
}
for _, expected := range expectedList {
if f.HasSynced() {
t.Errorf("Expected HasSynced to be false")
}
cur := Pop(f).(Deltas)
if e, a := expected, cur; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %#v, got %#v", e, a)
}
}
if f.HasSynced() {
t.Errorf("Expected HasSynced to be true")
}
}
func TestDeltaFIFO_detectLineJumpers(t *testing.T) {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
f.Add(mkFifoObj("foo", 10))
f.Add(mkFifoObj("bar", 1))
f.Add(mkFifoObj("foo", 11))
f.Add(mkFifoObj("foo", 13))
f.Add(mkFifoObj("zab", 30))
if e, a := 13, testPop(f).val; a != e {
t.Fatalf("expected %d, got %d", e, a)
}
f.Add(mkFifoObj("foo", 14)) // ensure foo doesn't jump back in line
if e, a := 1, testPop(f).val; a != e {
t.Fatalf("expected %d, got %d", e, a)
}
if e, a := 30, testPop(f).val; a != e {
t.Fatalf("expected %d, got %d", e, a)
}
if e, a := 14, testPop(f).val; a != e {
t.Fatalf("expected %d, got %d", e, a)
}
}
func TestDeltaFIFO_addIfNotPresent(t *testing.T) {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
f.Add(mkFifoObj("b", 3))
b3 := Pop(f)
f.Add(mkFifoObj("c", 4))
c4 := Pop(f)
if e, a := 0, len(f.items); e != a {
t.Fatalf("Expected %v, got %v items in queue", e, a)
}
f.Add(mkFifoObj("a", 1))
f.Add(mkFifoObj("b", 2))
f.AddIfNotPresent(b3)
f.AddIfNotPresent(c4)
if e, a := 3, len(f.items); a != e {
t.Fatalf("expected queue length %d, got %d", e, a)
}
expectedValues := []int{1, 2, 4}
for _, expected := range expectedValues {
if actual := testPop(f).val; actual != expected {
t.Fatalf("expected value %d, got %d", expected, actual)
}
}
}
func TestDeltaFIFO_KeyOf(t *testing.T) {
f := DeltaFIFO{keyFunc: testFifoObjectKeyFunc}
table := []struct {
obj interface{}
key string
}{
{obj: testFifoObject{name: "A"}, key: "A"},
{obj: DeletedFinalStateUnknown{Key: "B", Obj: nil}, key: "B"},
{obj: Deltas{{Object: testFifoObject{name: "C"}}}, key: "C"},
{obj: Deltas{{Object: DeletedFinalStateUnknown{Key: "D", Obj: nil}}}, key: "D"},
}
for _, item := range table {
got, err := f.KeyOf(item.obj)
if err != nil {
t.Errorf("Unexpected error for %q: %v", item.obj, err)
continue
}
if e, a := item.key, got; e != a {
t.Errorf("Expected %v, got %v", e, a)
}
}
}
func TestDeltaFIFO_HasSynced(t *testing.T) {
tests := []struct {
actions []func(f *DeltaFIFO)
expectedSynced bool
}{
{
actions: []func(f *DeltaFIFO){},
expectedSynced: false,
},
{
actions: []func(f *DeltaFIFO){
func(f *DeltaFIFO) { f.Add(mkFifoObj("a", 1)) },
},
expectedSynced: true,
},
{
actions: []func(f *DeltaFIFO){
func(f *DeltaFIFO) { f.Replace([]interface{}{}, "0") },
},
expectedSynced: true,
},
{
actions: []func(f *DeltaFIFO){
func(f *DeltaFIFO) { f.Replace([]interface{}{mkFifoObj("a", 1), mkFifoObj("b", 2)}, "0") },
},
expectedSynced: false,
},
{
actions: []func(f *DeltaFIFO){
func(f *DeltaFIFO) { f.Replace([]interface{}{mkFifoObj("a", 1), mkFifoObj("b", 2)}, "0") },
func(f *DeltaFIFO) { Pop(f) },
},
expectedSynced: false,
},
{
actions: []func(f *DeltaFIFO){
func(f *DeltaFIFO) { f.Replace([]interface{}{mkFifoObj("a", 1), mkFifoObj("b", 2)}, "0") },
func(f *DeltaFIFO) { Pop(f) },
func(f *DeltaFIFO) { Pop(f) },
},
expectedSynced: true,
},
}
for i, test := range tests {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
for _, action := range test.actions {
action(f)
}
if e, a := test.expectedSynced, f.HasSynced(); a != e {
t.Errorf("test case %v failed, expected: %v , got %v", i, e, a)
}
}
}

View File

@@ -1,189 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"reflect"
"testing"
"time"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
)
func TestTTLExpirationBasic(t *testing.T) {
testObj := testStoreObject{id: "foo", val: "bar"}
deleteChan := make(chan string, 1)
ttlStore := NewFakeExpirationStore(
testStoreKeyFunc, deleteChan,
&FakeExpirationPolicy{
NeverExpire: sets.NewString(),
RetrieveKeyFunc: func(obj interface{}) (string, error) {
return obj.(*timestampedEntry).obj.(testStoreObject).id, nil
},
},
clock.RealClock{},
)
err := ttlStore.Add(testObj)
if err != nil {
t.Errorf("Unable to add obj %#v", testObj)
}
item, exists, err := ttlStore.Get(testObj)
if err != nil {
t.Errorf("Failed to get from store, %v", err)
}
if exists || item != nil {
t.Errorf("Got unexpected item %#v", item)
}
key, _ := testStoreKeyFunc(testObj)
select {
case delKey := <-deleteChan:
if delKey != key {
t.Errorf("Unexpected delete for key %s", key)
}
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("Unexpected timeout waiting on delete")
}
close(deleteChan)
}
func TestReAddExpiredItem(t *testing.T) {
deleteChan := make(chan string, 1)
exp := &FakeExpirationPolicy{
NeverExpire: sets.NewString(),
RetrieveKeyFunc: func(obj interface{}) (string, error) {
return obj.(*timestampedEntry).obj.(testStoreObject).id, nil
},
}
ttlStore := NewFakeExpirationStore(
testStoreKeyFunc, deleteChan, exp, clock.RealClock{})
testKey := "foo"
testObj := testStoreObject{id: testKey, val: "bar"}
err := ttlStore.Add(testObj)
if err != nil {
t.Errorf("Unable to add obj %#v", testObj)
}
// This get will expire the item.
item, exists, err := ttlStore.Get(testObj)
if err != nil {
t.Errorf("Failed to get from store, %v", err)
}
if exists || item != nil {
t.Errorf("Got unexpected item %#v", item)
}
key, _ := testStoreKeyFunc(testObj)
differentValue := "different_bar"
err = ttlStore.Add(
testStoreObject{id: testKey, val: differentValue})
if err != nil {
t.Errorf("Failed to add second value")
}
select {
case delKey := <-deleteChan:
if delKey != key {
t.Errorf("Unexpected delete for key %s", key)
}
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("Unexpected timeout waiting on delete")
}
exp.NeverExpire = sets.NewString(testKey)
item, exists, err = ttlStore.GetByKey(testKey)
if err != nil {
t.Errorf("Failed to get from store, %v", err)
}
if !exists || item == nil || item.(testStoreObject).val != differentValue {
t.Errorf("Got unexpected item %#v", item)
}
close(deleteChan)
}
func TestTTLList(t *testing.T) {
testObjs := []testStoreObject{
{id: "foo", val: "bar"},
{id: "foo1", val: "bar1"},
{id: "foo2", val: "bar2"},
}
expireKeys := sets.NewString(testObjs[0].id, testObjs[2].id)
deleteChan := make(chan string, len(testObjs))
defer close(deleteChan)
ttlStore := NewFakeExpirationStore(
testStoreKeyFunc, deleteChan,
&FakeExpirationPolicy{
NeverExpire: sets.NewString(testObjs[1].id),
RetrieveKeyFunc: func(obj interface{}) (string, error) {
return obj.(*timestampedEntry).obj.(testStoreObject).id, nil
},
},
clock.RealClock{},
)
for _, obj := range testObjs {
err := ttlStore.Add(obj)
if err != nil {
t.Errorf("Unable to add obj %#v", obj)
}
}
listObjs := ttlStore.List()
if len(listObjs) != 1 || !reflect.DeepEqual(listObjs[0], testObjs[1]) {
t.Errorf("List returned unexpected results %#v", listObjs)
}
// Make sure all our deletes come through in an acceptable rate (1/100ms)
for expireKeys.Len() != 0 {
select {
case delKey := <-deleteChan:
if !expireKeys.Has(delKey) {
t.Errorf("Unexpected delete for key %s", delKey)
}
expireKeys.Delete(delKey)
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("Unexpected timeout waiting on delete")
return
}
}
}
func TestTTLPolicy(t *testing.T) {
fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
ttl := 30 * time.Second
exactlyOnTTL := fakeTime.Add(-ttl)
expiredTime := fakeTime.Add(-(ttl + 1))
policy := TTLPolicy{ttl, clock.NewFakeClock(fakeTime)}
fakeTimestampedEntry := &timestampedEntry{obj: struct{}{}, timestamp: exactlyOnTTL}
if policy.IsExpired(fakeTimestampedEntry) {
t.Errorf("TTL cache should not expire entries exactly on ttl")
}
fakeTimestampedEntry.timestamp = fakeTime
if policy.IsExpired(fakeTimestampedEntry) {
t.Errorf("TTL Cache should not expire entries before ttl")
}
fakeTimestampedEntry.timestamp = expiredTime
if !policy.IsExpired(fakeTimestampedEntry) {
t.Errorf("TTL Cache should expire entries older than ttl")
}
for _, ttl = range []time.Duration{0, -1} {
policy.Ttl = ttl
if policy.IsExpired(fakeTimestampedEntry) {
t.Errorf("TTL policy should only expire entries when initialized with a ttl > 0")
}
}
}

View File

@@ -1,280 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"fmt"
"reflect"
"testing"
"time"
)
func testFifoObjectKeyFunc(obj interface{}) (string, error) {
return obj.(testFifoObject).name, nil
}
type testFifoObject struct {
name string
val interface{}
}
func mkFifoObj(name string, val interface{}) testFifoObject {
return testFifoObject{name: name, val: val}
}
func TestFIFO_basic(t *testing.T) {
f := NewFIFO(testFifoObjectKeyFunc)
const amount = 500
go func() {
for i := 0; i < amount; i++ {
f.Add(mkFifoObj(string([]rune{'a', rune(i)}), i+1))
}
}()
go func() {
for u := uint64(0); u < amount; u++ {
f.Add(mkFifoObj(string([]rune{'b', rune(u)}), u+1))
}
}()
lastInt := int(0)
lastUint := uint64(0)
for i := 0; i < amount*2; i++ {
switch obj := Pop(f).(testFifoObject).val.(type) {
case int:
if obj <= lastInt {
t.Errorf("got %v (int) out of order, last was %v", obj, lastInt)
}
lastInt = obj
case uint64:
if obj <= lastUint {
t.Errorf("got %v (uint) out of order, last was %v", obj, lastUint)
} else {
lastUint = obj
}
default:
t.Fatalf("unexpected type %#v", obj)
}
}
}
func TestFIFO_requeueOnPop(t *testing.T) {
f := NewFIFO(testFifoObjectKeyFunc)
f.Add(mkFifoObj("foo", 10))
_, err := f.Pop(func(obj interface{}) error {
if obj.(testFifoObject).name != "foo" {
t.Fatalf("unexpected object: %#v", obj)
}
return ErrRequeue{Err: nil}
})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if _, ok, err := f.GetByKey("foo"); !ok || err != nil {
t.Fatalf("object should have been requeued: %t %v", ok, err)
}
_, err = f.Pop(func(obj interface{}) error {
if obj.(testFifoObject).name != "foo" {
t.Fatalf("unexpected object: %#v", obj)
}
return ErrRequeue{Err: fmt.Errorf("test error")}
})
if err == nil || err.Error() != "test error" {
t.Fatalf("unexpected error: %v", err)
}
if _, ok, err := f.GetByKey("foo"); !ok || err != nil {
t.Fatalf("object should have been requeued: %t %v", ok, err)
}
_, err = f.Pop(func(obj interface{}) error {
if obj.(testFifoObject).name != "foo" {
t.Fatalf("unexpected object: %#v", obj)
}
return nil
})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if _, ok, err := f.GetByKey("foo"); ok || err != nil {
t.Fatalf("object should have been removed: %t %v", ok, err)
}
}
func TestFIFO_addUpdate(t *testing.T) {
f := NewFIFO(testFifoObjectKeyFunc)
f.Add(mkFifoObj("foo", 10))
f.Update(mkFifoObj("foo", 15))
if e, a := []interface{}{mkFifoObj("foo", 15)}, f.List(); !reflect.DeepEqual(e, a) {
t.Errorf("Expected %+v, got %+v", e, a)
}
if e, a := []string{"foo"}, f.ListKeys(); !reflect.DeepEqual(e, a) {
t.Errorf("Expected %+v, got %+v", e, a)
}
got := make(chan testFifoObject, 2)
go func() {
for {
got <- Pop(f).(testFifoObject)
}
}()
first := <-got
if e, a := 15, first.val; e != a {
t.Errorf("Didn't get updated value (%v), got %v", e, a)
}
select {
case unexpected := <-got:
t.Errorf("Got second value %v", unexpected.val)
case <-time.After(50 * time.Millisecond):
}
_, exists, _ := f.Get(mkFifoObj("foo", ""))
if exists {
t.Errorf("item did not get removed")
}
}
func TestFIFO_addReplace(t *testing.T) {
f := NewFIFO(testFifoObjectKeyFunc)
f.Add(mkFifoObj("foo", 10))
f.Replace([]interface{}{mkFifoObj("foo", 15)}, "15")
got := make(chan testFifoObject, 2)
go func() {
for {
got <- Pop(f).(testFifoObject)
}
}()
first := <-got
if e, a := 15, first.val; e != a {
t.Errorf("Didn't get updated value (%v), got %v", e, a)
}
select {
case unexpected := <-got:
t.Errorf("Got second value %v", unexpected.val)
case <-time.After(50 * time.Millisecond):
}
_, exists, _ := f.Get(mkFifoObj("foo", ""))
if exists {
t.Errorf("item did not get removed")
}
}
func TestFIFO_detectLineJumpers(t *testing.T) {
f := NewFIFO(testFifoObjectKeyFunc)
f.Add(mkFifoObj("foo", 10))
f.Add(mkFifoObj("bar", 1))
f.Add(mkFifoObj("foo", 11))
f.Add(mkFifoObj("foo", 13))
f.Add(mkFifoObj("zab", 30))
if e, a := 13, Pop(f).(testFifoObject).val; a != e {
t.Fatalf("expected %d, got %d", e, a)
}
f.Add(mkFifoObj("foo", 14)) // ensure foo doesn't jump back in line
if e, a := 1, Pop(f).(testFifoObject).val; a != e {
t.Fatalf("expected %d, got %d", e, a)
}
if e, a := 30, Pop(f).(testFifoObject).val; a != e {
t.Fatalf("expected %d, got %d", e, a)
}
if e, a := 14, Pop(f).(testFifoObject).val; a != e {
t.Fatalf("expected %d, got %d", e, a)
}
}
func TestFIFO_addIfNotPresent(t *testing.T) {
f := NewFIFO(testFifoObjectKeyFunc)
f.Add(mkFifoObj("a", 1))
f.Add(mkFifoObj("b", 2))
f.AddIfNotPresent(mkFifoObj("b", 3))
f.AddIfNotPresent(mkFifoObj("c", 4))
if e, a := 3, len(f.items); a != e {
t.Fatalf("expected queue length %d, got %d", e, a)
}
expectedValues := []int{1, 2, 4}
for _, expected := range expectedValues {
if actual := Pop(f).(testFifoObject).val; actual != expected {
t.Fatalf("expected value %d, got %d", expected, actual)
}
}
}
func TestFIFO_HasSynced(t *testing.T) {
tests := []struct {
actions []func(f *FIFO)
expectedSynced bool
}{
{
actions: []func(f *FIFO){},
expectedSynced: false,
},
{
actions: []func(f *FIFO){
func(f *FIFO) { f.Add(mkFifoObj("a", 1)) },
},
expectedSynced: true,
},
{
actions: []func(f *FIFO){
func(f *FIFO) { f.Replace([]interface{}{}, "0") },
},
expectedSynced: true,
},
{
actions: []func(f *FIFO){
func(f *FIFO) { f.Replace([]interface{}{mkFifoObj("a", 1), mkFifoObj("b", 2)}, "0") },
},
expectedSynced: false,
},
{
actions: []func(f *FIFO){
func(f *FIFO) { f.Replace([]interface{}{mkFifoObj("a", 1), mkFifoObj("b", 2)}, "0") },
func(f *FIFO) { Pop(f) },
},
expectedSynced: false,
},
{
actions: []func(f *FIFO){
func(f *FIFO) { f.Replace([]interface{}{mkFifoObj("a", 1), mkFifoObj("b", 2)}, "0") },
func(f *FIFO) { Pop(f) },
func(f *FIFO) { Pop(f) },
},
expectedSynced: true,
},
}
for i, test := range tests {
f := NewFIFO(testFifoObjectKeyFunc)
for _, action := range test.actions {
action(f)
}
if e, a := test.expectedSynced, f.HasSynced(); a != e {
t.Errorf("test case %v failed, expected: %v , got %v", i, e, a)
}
}
}

View File

@@ -1,382 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"sync"
"testing"
"time"
)
func testHeapObjectKeyFunc(obj interface{}) (string, error) {
return obj.(testHeapObject).name, nil
}
type testHeapObject struct {
name string
val interface{}
}
func mkHeapObj(name string, val interface{}) testHeapObject {
return testHeapObject{name: name, val: val}
}
func compareInts(val1 interface{}, val2 interface{}) bool {
first := val1.(testHeapObject).val.(int)
second := val2.(testHeapObject).val.(int)
return first < second
}
// TestHeapBasic tests Heap invariant and synchronization.
func TestHeapBasic(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
var wg sync.WaitGroup
wg.Add(2)
const amount = 500
var i, u int
// Insert items in the heap in opposite orders in two go routines.
go func() {
for i = amount; i > 0; i-- {
h.Add(mkHeapObj(string([]rune{'a', rune(i)}), i))
}
wg.Done()
}()
go func() {
for u = 0; u < amount; u++ {
h.Add(mkHeapObj(string([]rune{'b', rune(u)}), u+1))
}
wg.Done()
}()
// Wait for the two go routines to finish.
wg.Wait()
// Make sure that the numbers are popped in ascending order.
prevNum := 0
for i := 0; i < amount*2; i++ {
obj, err := h.Pop()
num := obj.(testHeapObject).val.(int)
// All the items must be sorted.
if err != nil || prevNum > num {
t.Errorf("got %v out of order, last was %v", obj, prevNum)
}
prevNum = num
}
}
// Tests Heap.Add and ensures that heap invariant is preserved after adding items.
func TestHeap_Add(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
h.Add(mkHeapObj("foo", 10))
h.Add(mkHeapObj("bar", 1))
h.Add(mkHeapObj("baz", 11))
h.Add(mkHeapObj("zab", 30))
h.Add(mkHeapObj("foo", 13)) // This updates "foo".
item, err := h.Pop()
if e, a := 1, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
item, err = h.Pop()
if e, a := 11, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
h.Delete(mkHeapObj("baz", 11)) // Nothing is deleted.
h.Add(mkHeapObj("foo", 14)) // foo is updated.
item, err = h.Pop()
if e, a := 14, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
item, err = h.Pop()
if e, a := 30, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
}
// TestHeap_BulkAdd tests Heap.BulkAdd functionality and ensures that all the
// items given to BulkAdd are added to the queue before Pop reads them.
func TestHeap_BulkAdd(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
const amount = 500
// Insert items in the heap in opposite orders in a go routine.
go func() {
l := []interface{}{}
for i := amount; i > 0; i-- {
l = append(l, mkHeapObj(string([]rune{'a', rune(i)}), i))
}
h.BulkAdd(l)
}()
prevNum := -1
for i := 0; i < amount; i++ {
obj, err := h.Pop()
num := obj.(testHeapObject).val.(int)
// All the items must be sorted.
if err != nil || prevNum >= num {
t.Errorf("got %v out of order, last was %v", obj, prevNum)
}
prevNum = num
}
}
// TestHeapEmptyPop tests that pop returns properly after heap is closed.
func TestHeapEmptyPop(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
go func() {
time.Sleep(1 * time.Second)
h.Close()
}()
_, err := h.Pop()
if err == nil || err.Error() != closedMsg {
t.Errorf("pop should have returned heap closed error: %v", err)
}
}
// TestHeap_AddIfNotPresent tests Heap.AddIfNotPresent and ensures that heap
// invariant is preserved after adding items.
func TestHeap_AddIfNotPresent(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
h.AddIfNotPresent(mkHeapObj("foo", 10))
h.AddIfNotPresent(mkHeapObj("bar", 1))
h.AddIfNotPresent(mkHeapObj("baz", 11))
h.AddIfNotPresent(mkHeapObj("zab", 30))
h.AddIfNotPresent(mkHeapObj("foo", 13)) // This is not added.
if len := len(h.data.items); len != 4 {
t.Errorf("unexpected number of items: %d", len)
}
if val := h.data.items["foo"].obj.(testHeapObject).val; val != 10 {
t.Errorf("unexpected value: %d", val)
}
item, err := h.Pop()
if e, a := 1, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
item, err = h.Pop()
if e, a := 10, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
// bar is already popped. Let's add another one.
h.AddIfNotPresent(mkHeapObj("bar", 14))
item, err = h.Pop()
if e, a := 11, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
item, err = h.Pop()
if e, a := 14, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
}
// TestHeap_Delete tests Heap.Delete and ensures that heap invariant is
// preserved after deleting items.
func TestHeap_Delete(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
h.Add(mkHeapObj("foo", 10))
h.Add(mkHeapObj("bar", 1))
h.Add(mkHeapObj("bal", 31))
h.Add(mkHeapObj("baz", 11))
// Delete head. Delete should work with "key" and doesn't care about the value.
if err := h.Delete(mkHeapObj("bar", 200)); err != nil {
t.Fatalf("Failed to delete head.")
}
item, err := h.Pop()
if e, a := 10, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
h.Add(mkHeapObj("zab", 30))
h.Add(mkHeapObj("faz", 30))
len := h.data.Len()
// Delete non-existing item.
if err = h.Delete(mkHeapObj("non-existent", 10)); err == nil || len != h.data.Len() {
t.Fatalf("Didn't expect any item removal")
}
// Delete tail.
if err = h.Delete(mkHeapObj("bal", 31)); err != nil {
t.Fatalf("Failed to delete tail.")
}
// Delete one of the items with value 30.
if err = h.Delete(mkHeapObj("zab", 30)); err != nil {
t.Fatalf("Failed to delete item.")
}
item, err = h.Pop()
if e, a := 11, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
item, err = h.Pop()
if e, a := 30, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
if h.data.Len() != 0 {
t.Fatalf("expected an empty heap.")
}
}
// TestHeap_Update tests Heap.Update and ensures that heap invariant is
// preserved after adding items.
func TestHeap_Update(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
h.Add(mkHeapObj("foo", 10))
h.Add(mkHeapObj("bar", 1))
h.Add(mkHeapObj("bal", 31))
h.Add(mkHeapObj("baz", 11))
// Update an item to a value that should push it to the head.
h.Update(mkHeapObj("baz", 0))
if h.data.queue[0] != "baz" || h.data.items["baz"].index != 0 {
t.Fatalf("expected baz to be at the head")
}
item, err := h.Pop()
if e, a := 0, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
// Update bar to push it farther back in the queue.
h.Update(mkHeapObj("bar", 100))
if h.data.queue[0] != "foo" || h.data.items["foo"].index != 0 {
t.Fatalf("expected foo to be at the head")
}
}
// TestHeap_Get tests Heap.Get.
func TestHeap_Get(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
h.Add(mkHeapObj("foo", 10))
h.Add(mkHeapObj("bar", 1))
h.Add(mkHeapObj("bal", 31))
h.Add(mkHeapObj("baz", 11))
// Get works with the key.
obj, exists, err := h.Get(mkHeapObj("baz", 0))
if err != nil || exists == false || obj.(testHeapObject).val != 11 {
t.Fatalf("unexpected error in getting element")
}
// Get non-existing object.
_, exists, err = h.Get(mkHeapObj("non-existing", 0))
if err != nil || exists == true {
t.Fatalf("didn't expect to get any object")
}
}
// TestHeap_GetByKey tests Heap.GetByKey and is very similar to TestHeap_Get.
func TestHeap_GetByKey(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
h.Add(mkHeapObj("foo", 10))
h.Add(mkHeapObj("bar", 1))
h.Add(mkHeapObj("bal", 31))
h.Add(mkHeapObj("baz", 11))
obj, exists, err := h.GetByKey("baz")
if err != nil || exists == false || obj.(testHeapObject).val != 11 {
t.Fatalf("unexpected error in getting element")
}
// Get non-existing object.
_, exists, err = h.GetByKey("non-existing")
if err != nil || exists == true {
t.Fatalf("didn't expect to get any object")
}
}
// TestHeap_Close tests Heap.Close and Heap.IsClosed functions.
func TestHeap_Close(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
h.Add(mkHeapObj("foo", 10))
h.Add(mkHeapObj("bar", 1))
if h.IsClosed() {
t.Fatalf("didn't expect heap to be closed")
}
h.Close()
if !h.IsClosed() {
t.Fatalf("expect heap to be closed")
}
}
// TestHeap_List tests Heap.List function.
func TestHeap_List(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
list := h.List()
if len(list) != 0 {
t.Errorf("expected an empty list")
}
items := map[string]int{
"foo": 10,
"bar": 1,
"bal": 30,
"baz": 11,
"faz": 30,
}
for k, v := range items {
h.Add(mkHeapObj(k, v))
}
list = h.List()
if len(list) != len(items) {
t.Errorf("expected %d items, got %d", len(items), len(list))
}
for _, obj := range list {
heapObj := obj.(testHeapObject)
v, ok := items[heapObj.name]
if !ok || v != heapObj.val {
t.Errorf("unexpected item in the list: %v", heapObj)
}
}
}
// TestHeap_ListKeys tests Heap.ListKeys function. Scenario is the same as
// TestHeap_list.
func TestHeap_ListKeys(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
list := h.ListKeys()
if len(list) != 0 {
t.Errorf("expected an empty list")
}
items := map[string]int{
"foo": 10,
"bar": 1,
"bal": 30,
"baz": 11,
"faz": 30,
}
for k, v := range items {
h.Add(mkHeapObj(k, v))
}
list = h.ListKeys()
if len(list) != len(items) {
t.Errorf("expected %d items, got %d", len(items), len(list))
}
for _, key := range list {
_, ok := items[key]
if !ok {
t.Errorf("unexpected item in the list: %v", key)
}
}
}
// TestHeapAddAfterClose tests that heap returns an error if anything is added
// after it is closed.
func TestHeapAddAfterClose(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
h.Close()
if err := h.Add(mkHeapObj("test", 1)); err == nil || err.Error() != closedMsg {
t.Errorf("expected heap closed error")
}
if err := h.AddIfNotPresent(mkHeapObj("test", 1)); err == nil || err.Error() != closedMsg {
t.Errorf("expected heap closed error")
}
if err := h.BulkAdd([]interface{}{mkHeapObj("test", 1)}); err == nil || err.Error() != closedMsg {
t.Errorf("expected heap closed error")
}
}

View File

@@ -1,134 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"k8s.io/apimachinery/pkg/util/sets"
"strings"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func testIndexFunc(obj interface{}) ([]string, error) {
pod := obj.(*v1.Pod)
return []string{pod.Labels["foo"]}, nil
}
func TestGetIndexFuncValues(t *testing.T) {
index := NewIndexer(MetaNamespaceKeyFunc, Indexers{"testmodes": testIndexFunc})
pod1 := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "one", Labels: map[string]string{"foo": "bar"}}}
pod2 := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "two", Labels: map[string]string{"foo": "bar"}}}
pod3 := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "tre", Labels: map[string]string{"foo": "biz"}}}
index.Add(pod1)
index.Add(pod2)
index.Add(pod3)
keys := index.ListIndexFuncValues("testmodes")
if len(keys) != 2 {
t.Errorf("Expected 2 keys but got %v", len(keys))
}
for _, key := range keys {
if key != "bar" && key != "biz" {
t.Errorf("Expected only 'bar' or 'biz' but got %s", key)
}
}
}
func testUsersIndexFunc(obj interface{}) ([]string, error) {
pod := obj.(*v1.Pod)
usersString := pod.Annotations["users"]
return strings.Split(usersString, ","), nil
}
func TestMultiIndexKeys(t *testing.T) {
index := NewIndexer(MetaNamespaceKeyFunc, Indexers{"byUser": testUsersIndexFunc})
pod1 := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "one", Annotations: map[string]string{"users": "ernie,bert"}}}
pod2 := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "two", Annotations: map[string]string{"users": "bert,oscar"}}}
pod3 := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "tre", Annotations: map[string]string{"users": "ernie,elmo"}}}
index.Add(pod1)
index.Add(pod2)
index.Add(pod3)
expected := map[string]sets.String{}
expected["ernie"] = sets.NewString("one", "tre")
expected["bert"] = sets.NewString("one", "two")
expected["elmo"] = sets.NewString("tre")
expected["oscar"] = sets.NewString("two")
expected["elmo"] = sets.NewString() // let's just make sure we don't get anything back in this case
{
for k, v := range expected {
found := sets.String{}
indexResults, err := index.ByIndex("byUser", k)
if err != nil {
t.Errorf("Unexpected error %v", err)
}
for _, item := range indexResults {
found.Insert(item.(*v1.Pod).Name)
}
items := v.List()
if !found.HasAll(items...) {
t.Errorf("missing items, index %s, expected %v but found %v", k, items, found.List())
}
}
}
index.Delete(pod3)
erniePods, err := index.ByIndex("byUser", "ernie")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(erniePods) != 1 {
t.Errorf("Expected 1 pods but got %v", len(erniePods))
}
for _, erniePod := range erniePods {
if erniePod.(*v1.Pod).Name != "one" {
t.Errorf("Expected only 'one' but got %s", erniePod.(*v1.Pod).Name)
}
}
elmoPods, err := index.ByIndex("byUser", "elmo")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(elmoPods) != 0 {
t.Errorf("Expected 0 pods but got %v", len(elmoPods))
}
copyOfPod2 := pod2.DeepCopy()
copyOfPod2.Annotations["users"] = "oscar"
index.Update(copyOfPod2)
bertPods, err := index.ByIndex("byUser", "bert")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(bertPods) != 1 {
t.Errorf("Expected 1 pods but got %v", len(bertPods))
}
for _, bertPod := range bertPods {
if bertPod.(*v1.Pod).Name != "one" {
t.Errorf("Expected only 'one' but got %s", bertPod.(*v1.Pod).Name)
}
}
}

View File

@@ -1,81 +0,0 @@
// +build !race
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
)
func TestMutationDetector(t *testing.T) {
fakeWatch := watch.NewFake()
lw := &testLW{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return fakeWatch, nil
},
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return &v1.PodList{}, nil
},
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "anything",
Labels: map[string]string{"check": "foo"},
},
}
stopCh := make(chan struct{})
defer close(stopCh)
addReceived := make(chan bool)
mutationFound := make(chan bool)
informer := NewSharedInformer(lw, &v1.Pod{}, 1*time.Second).(*sharedIndexInformer)
informer.cacheMutationDetector = &defaultCacheMutationDetector{
name: "name",
period: 1 * time.Second,
failureFunc: func(message string) {
mutationFound <- true
},
}
informer.AddEventHandler(
ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
addReceived <- true
},
},
)
go informer.Run(stopCh)
fakeWatch.Add(pod)
select {
case <-addReceived:
}
pod.Labels["change"] = "true"
select {
case <-mutationFound:
}
}

View File

@@ -1,58 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"sync"
"testing"
"time"
"k8s.io/apimachinery/pkg/util/wait"
)
const (
concurrencyLevel = 5
)
func BenchmarkListener(b *testing.B) {
var notification addNotification
var swg sync.WaitGroup
swg.Add(b.N)
b.SetParallelism(concurrencyLevel)
// Preallocate enough space so that benchmark does not run out of it
pl := newProcessListener(&ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
swg.Done()
},
}, 0, 0, time.Now(), 1024*1024)
var wg wait.Group
defer wg.Wait() // Wait for .run and .pop to stop
defer close(pl.addCh) // Tell .run and .pop to stop
wg.Start(pl.run)
wg.Start(pl.pop)
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
pl.add(notification)
}
})
swg.Wait() // Block until all notifications have been received
b.StopTimer()
}

View File

@@ -1,389 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"errors"
"fmt"
"math/rand"
"strconv"
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
)
var nevererrc chan error
type testLW struct {
ListFunc func(options metav1.ListOptions) (runtime.Object, error)
WatchFunc func(options metav1.ListOptions) (watch.Interface, error)
}
func (t *testLW) List(options metav1.ListOptions) (runtime.Object, error) {
return t.ListFunc(options)
}
func (t *testLW) Watch(options metav1.ListOptions) (watch.Interface, error) {
return t.WatchFunc(options)
}
func TestCloseWatchChannelOnError(t *testing.T) {
r := NewReflector(&testLW{}, &v1.Pod{}, NewStore(MetaNamespaceKeyFunc), 0)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "bar"}}
fw := watch.NewFake()
r.listerWatcher = &testLW{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return fw, nil
},
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "1"}}, nil
},
}
go r.ListAndWatch(wait.NeverStop)
fw.Error(pod)
select {
case _, ok := <-fw.ResultChan():
if ok {
t.Errorf("Watch channel left open after cancellation")
}
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("the cancellation is at least %s late", wait.ForeverTestTimeout.String())
break
}
}
func TestRunUntil(t *testing.T) {
stopCh := make(chan struct{})
store := NewStore(MetaNamespaceKeyFunc)
r := NewReflector(&testLW{}, &v1.Pod{}, store, 0)
fw := watch.NewFake()
r.listerWatcher = &testLW{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return fw, nil
},
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "1"}}, nil
},
}
go r.Run(stopCh)
// Synchronously add a dummy pod into the watch channel so we
// know the RunUntil go routine is in the watch handler.
fw.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "bar"}})
close(stopCh)
select {
case _, ok := <-fw.ResultChan():
if ok {
t.Errorf("Watch channel left open after stopping the watch")
}
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("the cancellation is at least %s late", wait.ForeverTestTimeout.String())
break
}
}
func TestReflectorResyncChan(t *testing.T) {
s := NewStore(MetaNamespaceKeyFunc)
g := NewReflector(&testLW{}, &v1.Pod{}, s, time.Millisecond)
a, _ := g.resyncChan()
b := time.After(wait.ForeverTestTimeout)
select {
case <-a:
t.Logf("got timeout as expected")
case <-b:
t.Errorf("resyncChan() is at least 99 milliseconds late??")
}
}
func BenchmarkReflectorResyncChanMany(b *testing.B) {
s := NewStore(MetaNamespaceKeyFunc)
g := NewReflector(&testLW{}, &v1.Pod{}, s, 25*time.Millisecond)
// The improvement to this (calling the timer's Stop() method) makes
// this benchmark about 40% faster.
for i := 0; i < b.N; i++ {
g.resyncPeriod = time.Duration(rand.Float64() * float64(time.Millisecond) * 25)
_, stop := g.resyncChan()
stop()
}
}
func TestReflectorWatchHandlerError(t *testing.T) {
s := NewStore(MetaNamespaceKeyFunc)
g := NewReflector(&testLW{}, &v1.Pod{}, s, 0)
fw := watch.NewFake()
go func() {
fw.Stop()
}()
var resumeRV string
err := g.watchHandler(fw, &resumeRV, nevererrc, wait.NeverStop)
if err == nil {
t.Errorf("unexpected non-error")
}
}
func TestReflectorWatchHandler(t *testing.T) {
s := NewStore(MetaNamespaceKeyFunc)
g := NewReflector(&testLW{}, &v1.Pod{}, s, 0)
fw := watch.NewFake()
s.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo"}})
s.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "bar"}})
go func() {
fw.Add(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "rejected"}})
fw.Delete(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo"}})
fw.Modify(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "bar", ResourceVersion: "55"}})
fw.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "baz", ResourceVersion: "32"}})
fw.Stop()
}()
var resumeRV string
err := g.watchHandler(fw, &resumeRV, nevererrc, wait.NeverStop)
if err != nil {
t.Errorf("unexpected error %v", err)
}
mkPod := func(id string, rv string) *v1.Pod {
return &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: id, ResourceVersion: rv}}
}
table := []struct {
Pod *v1.Pod
exists bool
}{
{mkPod("foo", ""), false},
{mkPod("rejected", ""), false},
{mkPod("bar", "55"), true},
{mkPod("baz", "32"), true},
}
for _, item := range table {
obj, exists, _ := s.Get(item.Pod)
if e, a := item.exists, exists; e != a {
t.Errorf("%v: expected %v, got %v", item.Pod, e, a)
}
if !exists {
continue
}
if e, a := item.Pod.ResourceVersion, obj.(*v1.Pod).ResourceVersion; e != a {
t.Errorf("%v: expected %v, got %v", item.Pod, e, a)
}
}
// RV should send the last version we see.
if e, a := "32", resumeRV; e != a {
t.Errorf("expected %v, got %v", e, a)
}
// last sync resource version should be the last version synced with store
if e, a := "32", g.LastSyncResourceVersion(); e != a {
t.Errorf("expected %v, got %v", e, a)
}
}
func TestReflectorStopWatch(t *testing.T) {
s := NewStore(MetaNamespaceKeyFunc)
g := NewReflector(&testLW{}, &v1.Pod{}, s, 0)
fw := watch.NewFake()
var resumeRV string
stopWatch := make(chan struct{}, 1)
stopWatch <- struct{}{}
err := g.watchHandler(fw, &resumeRV, nevererrc, stopWatch)
if err != errorStopRequested {
t.Errorf("expected stop error, got %q", err)
}
}
func TestReflectorListAndWatch(t *testing.T) {
createdFakes := make(chan *watch.FakeWatcher)
// The ListFunc says that it's at revision 1. Therefore, we expect our WatchFunc
// to get called at the beginning of the watch with 1, and again with 3 when we
// inject an error.
expectedRVs := []string{"1", "3"}
lw := &testLW{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
rv := options.ResourceVersion
fw := watch.NewFake()
if e, a := expectedRVs[0], rv; e != a {
t.Errorf("Expected rv %v, but got %v", e, a)
}
expectedRVs = expectedRVs[1:]
// channel is not buffered because the for loop below needs to block. But
// we don't want to block here, so report the new fake via a go routine.
go func() { createdFakes <- fw }()
return fw, nil
},
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "1"}}, nil
},
}
s := NewFIFO(MetaNamespaceKeyFunc)
r := NewReflector(lw, &v1.Pod{}, s, 0)
go r.ListAndWatch(wait.NeverStop)
ids := []string{"foo", "bar", "baz", "qux", "zoo"}
var fw *watch.FakeWatcher
for i, id := range ids {
if fw == nil {
fw = <-createdFakes
}
sendingRV := strconv.FormatUint(uint64(i+2), 10)
fw.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: id, ResourceVersion: sendingRV}})
if sendingRV == "3" {
// Inject a failure.
fw.Stop()
fw = nil
}
}
// Verify we received the right ids with the right resource versions.
for i, id := range ids {
pod := Pop(s).(*v1.Pod)
if e, a := id, pod.Name; e != a {
t.Errorf("%v: Expected %v, got %v", i, e, a)
}
if e, a := strconv.FormatUint(uint64(i+2), 10), pod.ResourceVersion; e != a {
t.Errorf("%v: Expected %v, got %v", i, e, a)
}
}
if len(expectedRVs) != 0 {
t.Error("called watchStarter an unexpected number of times")
}
}
func TestReflectorListAndWatchWithErrors(t *testing.T) {
mkPod := func(id string, rv string) *v1.Pod {
return &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: id, ResourceVersion: rv}}
}
mkList := func(rv string, pods ...*v1.Pod) *v1.PodList {
list := &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: rv}}
for _, pod := range pods {
list.Items = append(list.Items, *pod)
}
return list
}
table := []struct {
list *v1.PodList
listErr error
events []watch.Event
watchErr error
}{
{
list: mkList("1"),
events: []watch.Event{
{Type: watch.Added, Object: mkPod("foo", "2")},
{Type: watch.Added, Object: mkPod("bar", "3")},
},
}, {
list: mkList("3", mkPod("foo", "2"), mkPod("bar", "3")),
events: []watch.Event{
{Type: watch.Deleted, Object: mkPod("foo", "4")},
{Type: watch.Added, Object: mkPod("qux", "5")},
},
}, {
listErr: fmt.Errorf("a list error"),
}, {
list: mkList("5", mkPod("bar", "3"), mkPod("qux", "5")),
watchErr: fmt.Errorf("a watch error"),
}, {
list: mkList("5", mkPod("bar", "3"), mkPod("qux", "5")),
events: []watch.Event{
{Type: watch.Added, Object: mkPod("baz", "6")},
},
}, {
list: mkList("6", mkPod("bar", "3"), mkPod("qux", "5"), mkPod("baz", "6")),
},
}
s := NewFIFO(MetaNamespaceKeyFunc)
for line, item := range table {
if item.list != nil {
// Test that the list is what currently exists in the store.
current := s.List()
checkMap := map[string]string{}
for _, item := range current {
pod := item.(*v1.Pod)
checkMap[pod.Name] = pod.ResourceVersion
}
for _, pod := range item.list.Items {
if e, a := pod.ResourceVersion, checkMap[pod.Name]; e != a {
t.Errorf("%v: expected %v, got %v for pod %v", line, e, a, pod.Name)
}
}
if e, a := len(item.list.Items), len(checkMap); e != a {
t.Errorf("%v: expected %v, got %v", line, e, a)
}
}
watchRet, watchErr := item.events, item.watchErr
lw := &testLW{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
if watchErr != nil {
return nil, watchErr
}
watchErr = fmt.Errorf("second watch")
fw := watch.NewFake()
go func() {
for _, e := range watchRet {
fw.Action(e.Type, e.Object)
}
fw.Stop()
}()
return fw, nil
},
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return item.list, item.listErr
},
}
r := NewReflector(lw, &v1.Pod{}, s, 0)
r.ListAndWatch(wait.NeverStop)
}
}
func TestReflectorResync(t *testing.T) {
iteration := 0
stopCh := make(chan struct{})
rerr := errors.New("expected resync reached")
s := &FakeCustomStore{
ResyncFunc: func() error {
iteration++
if iteration == 2 {
return rerr
}
return nil
},
}
lw := &testLW{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
fw := watch.NewFake()
return fw, nil
},
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "0"}}, nil
},
}
resyncPeriod := 1 * time.Millisecond
r := NewReflector(lw, &v1.Pod{}, s, resyncPeriod)
if err := r.ListAndWatch(stopCh); err != nil {
// error from Resync is not propaged up to here.
t.Errorf("expected error %v", err)
}
if iteration != 2 {
t.Errorf("exactly 2 iterations were expected, got: %v", iteration)
}
}

View File

@@ -1,265 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"fmt"
"sync"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
fcache "k8s.io/client-go/tools/cache/testing"
)
type testListener struct {
lock sync.RWMutex
resyncPeriod time.Duration
expectedItemNames sets.String
receivedItemNames []string
name string
}
func newTestListener(name string, resyncPeriod time.Duration, expected ...string) *testListener {
l := &testListener{
resyncPeriod: resyncPeriod,
expectedItemNames: sets.NewString(expected...),
name: name,
}
return l
}
func (l *testListener) OnAdd(obj interface{}) {
l.handle(obj)
}
func (l *testListener) OnUpdate(old, new interface{}) {
l.handle(new)
}
func (l *testListener) OnDelete(obj interface{}) {
}
func (l *testListener) handle(obj interface{}) {
key, _ := MetaNamespaceKeyFunc(obj)
fmt.Printf("%s: handle: %v\n", l.name, key)
l.lock.Lock()
defer l.lock.Unlock()
objectMeta, _ := meta.Accessor(obj)
l.receivedItemNames = append(l.receivedItemNames, objectMeta.GetName())
}
func (l *testListener) ok() bool {
fmt.Println("polling")
err := wait.PollImmediate(100*time.Millisecond, 2*time.Second, func() (bool, error) {
if l.satisfiedExpectations() {
return true, nil
}
return false, nil
})
if err != nil {
return false
}
// wait just a bit to allow any unexpected stragglers to come in
fmt.Println("sleeping")
time.Sleep(1 * time.Second)
fmt.Println("final check")
return l.satisfiedExpectations()
}
func (l *testListener) satisfiedExpectations() bool {
l.lock.RLock()
defer l.lock.RUnlock()
return len(l.receivedItemNames) == l.expectedItemNames.Len() && sets.NewString(l.receivedItemNames...).Equal(l.expectedItemNames)
}
func TestListenerResyncPeriods(t *testing.T) {
// source simulates an apiserver object endpoint.
source := fcache.NewFakeControllerSource()
source.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1"}})
source.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod2"}})
// create the shared informer and resync every 1s
informer := NewSharedInformer(source, &v1.Pod{}, 1*time.Second).(*sharedIndexInformer)
clock := clock.NewFakeClock(time.Now())
informer.clock = clock
informer.processor.clock = clock
// listener 1, never resync
listener1 := newTestListener("listener1", 0, "pod1", "pod2")
informer.AddEventHandlerWithResyncPeriod(listener1, listener1.resyncPeriod)
// listener 2, resync every 2s
listener2 := newTestListener("listener2", 2*time.Second, "pod1", "pod2")
informer.AddEventHandlerWithResyncPeriod(listener2, listener2.resyncPeriod)
// listener 3, resync every 3s
listener3 := newTestListener("listener3", 3*time.Second, "pod1", "pod2")
informer.AddEventHandlerWithResyncPeriod(listener3, listener3.resyncPeriod)
listeners := []*testListener{listener1, listener2, listener3}
stop := make(chan struct{})
defer close(stop)
go informer.Run(stop)
// ensure all listeners got the initial List
for _, listener := range listeners {
if !listener.ok() {
t.Errorf("%s: expected %v, got %v", listener.name, listener.expectedItemNames, listener.receivedItemNames)
}
}
// reset
for _, listener := range listeners {
listener.receivedItemNames = []string{}
}
// advance so listener2 gets a resync
clock.Step(2 * time.Second)
// make sure listener2 got the resync
if !listener2.ok() {
t.Errorf("%s: expected %v, got %v", listener2.name, listener2.expectedItemNames, listener2.receivedItemNames)
}
// wait a bit to give errant items a chance to go to 1 and 3
time.Sleep(1 * time.Second)
// make sure listeners 1 and 3 got nothing
if len(listener1.receivedItemNames) != 0 {
t.Errorf("listener1: should not have resynced (got %d)", len(listener1.receivedItemNames))
}
if len(listener3.receivedItemNames) != 0 {
t.Errorf("listener3: should not have resynced (got %d)", len(listener3.receivedItemNames))
}
// reset
for _, listener := range listeners {
listener.receivedItemNames = []string{}
}
// advance so listener3 gets a resync
clock.Step(1 * time.Second)
// make sure listener3 got the resync
if !listener3.ok() {
t.Errorf("%s: expected %v, got %v", listener3.name, listener3.expectedItemNames, listener3.receivedItemNames)
}
// wait a bit to give errant items a chance to go to 1 and 2
time.Sleep(1 * time.Second)
// make sure listeners 1 and 2 got nothing
if len(listener1.receivedItemNames) != 0 {
t.Errorf("listener1: should not have resynced (got %d)", len(listener1.receivedItemNames))
}
if len(listener2.receivedItemNames) != 0 {
t.Errorf("listener2: should not have resynced (got %d)", len(listener2.receivedItemNames))
}
}
func TestResyncCheckPeriod(t *testing.T) {
// source simulates an apiserver object endpoint.
source := fcache.NewFakeControllerSource()
// create the shared informer and resync every 12 hours
informer := NewSharedInformer(source, &v1.Pod{}, 12*time.Hour).(*sharedIndexInformer)
clock := clock.NewFakeClock(time.Now())
informer.clock = clock
informer.processor.clock = clock
// listener 1, never resync
listener1 := newTestListener("listener1", 0)
informer.AddEventHandlerWithResyncPeriod(listener1, listener1.resyncPeriod)
if e, a := 12*time.Hour, informer.resyncCheckPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
if e, a := time.Duration(0), informer.processor.listeners[0].resyncPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
// listener 2, resync every minute
listener2 := newTestListener("listener2", 1*time.Minute)
informer.AddEventHandlerWithResyncPeriod(listener2, listener2.resyncPeriod)
if e, a := 1*time.Minute, informer.resyncCheckPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
if e, a := time.Duration(0), informer.processor.listeners[0].resyncPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
if e, a := 1*time.Minute, informer.processor.listeners[1].resyncPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
// listener 3, resync every 55 seconds
listener3 := newTestListener("listener3", 55*time.Second)
informer.AddEventHandlerWithResyncPeriod(listener3, listener3.resyncPeriod)
if e, a := 55*time.Second, informer.resyncCheckPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
if e, a := time.Duration(0), informer.processor.listeners[0].resyncPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
if e, a := 1*time.Minute, informer.processor.listeners[1].resyncPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
if e, a := 55*time.Second, informer.processor.listeners[2].resyncPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
// listener 4, resync every 5 seconds
listener4 := newTestListener("listener4", 5*time.Second)
informer.AddEventHandlerWithResyncPeriod(listener4, listener4.resyncPeriod)
if e, a := 5*time.Second, informer.resyncCheckPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
if e, a := time.Duration(0), informer.processor.listeners[0].resyncPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
if e, a := 1*time.Minute, informer.processor.listeners[1].resyncPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
if e, a := 55*time.Second, informer.processor.listeners[2].resyncPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
if e, a := 5*time.Second, informer.processor.listeners[3].resyncPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
}
// verify that https://github.com/kubernetes/kubernetes/issues/59822 is fixed
func TestSharedInformerInitializationRace(t *testing.T) {
source := fcache.NewFakeControllerSource()
informer := NewSharedInformer(source, &v1.Pod{}, 1*time.Second).(*sharedIndexInformer)
listener := newTestListener("raceListener", 0)
stop := make(chan struct{})
go informer.AddEventHandlerWithResyncPeriod(listener, listener.resyncPeriod)
go informer.Run(stop)
close(stop)
}

View File

@@ -1,156 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"testing"
"k8s.io/apimachinery/pkg/util/sets"
)
// Test public interface
func doTestStore(t *testing.T, store Store) {
mkObj := func(id string, val string) testStoreObject {
return testStoreObject{id: id, val: val}
}
store.Add(mkObj("foo", "bar"))
if item, ok, _ := store.Get(mkObj("foo", "")); !ok {
t.Errorf("didn't find inserted item")
} else {
if e, a := "bar", item.(testStoreObject).val; e != a {
t.Errorf("expected %v, got %v", e, a)
}
}
store.Update(mkObj("foo", "baz"))
if item, ok, _ := store.Get(mkObj("foo", "")); !ok {
t.Errorf("didn't find inserted item")
} else {
if e, a := "baz", item.(testStoreObject).val; e != a {
t.Errorf("expected %v, got %v", e, a)
}
}
store.Delete(mkObj("foo", ""))
if _, ok, _ := store.Get(mkObj("foo", "")); ok {
t.Errorf("found deleted item??")
}
// Test List.
store.Add(mkObj("a", "b"))
store.Add(mkObj("c", "d"))
store.Add(mkObj("e", "e"))
{
found := sets.String{}
for _, item := range store.List() {
found.Insert(item.(testStoreObject).val)
}
if !found.HasAll("b", "d", "e") {
t.Errorf("missing items, found: %v", found)
}
if len(found) != 3 {
t.Errorf("extra items")
}
}
// Test Replace.
store.Replace([]interface{}{
mkObj("foo", "foo"),
mkObj("bar", "bar"),
}, "0")
{
found := sets.String{}
for _, item := range store.List() {
found.Insert(item.(testStoreObject).val)
}
if !found.HasAll("foo", "bar") {
t.Errorf("missing items")
}
if len(found) != 2 {
t.Errorf("extra items")
}
}
}
// Test public interface
func doTestIndex(t *testing.T, indexer Indexer) {
mkObj := func(id string, val string) testStoreObject {
return testStoreObject{id: id, val: val}
}
// Test Index
expected := map[string]sets.String{}
expected["b"] = sets.NewString("a", "c")
expected["f"] = sets.NewString("e")
expected["h"] = sets.NewString("g")
indexer.Add(mkObj("a", "b"))
indexer.Add(mkObj("c", "b"))
indexer.Add(mkObj("e", "f"))
indexer.Add(mkObj("g", "h"))
{
for k, v := range expected {
found := sets.String{}
indexResults, err := indexer.Index("by_val", mkObj("", k))
if err != nil {
t.Errorf("Unexpected error %v", err)
}
for _, item := range indexResults {
found.Insert(item.(testStoreObject).id)
}
items := v.List()
if !found.HasAll(items...) {
t.Errorf("missing items, index %s, expected %v but found %v", k, items, found.List())
}
}
}
}
func testStoreKeyFunc(obj interface{}) (string, error) {
return obj.(testStoreObject).id, nil
}
func testStoreIndexFunc(obj interface{}) ([]string, error) {
return []string{obj.(testStoreObject).val}, nil
}
func testStoreIndexers() Indexers {
indexers := Indexers{}
indexers["by_val"] = testStoreIndexFunc
return indexers
}
type testStoreObject struct {
id string
val string
}
func TestCache(t *testing.T) {
doTestStore(t, NewStore(testStoreKeyFunc))
}
func TestFIFOCache(t *testing.T) {
doTestStore(t, NewFIFO(testStoreKeyFunc))
}
func TestUndeltaStore(t *testing.T) {
nop := func([]interface{}) {}
doTestStore(t, NewUndeltaStore(nop, testStoreKeyFunc))
}
func TestIndex(t *testing.T) {
doTestIndex(t, NewIndexer(testStoreKeyFunc, testStoreIndexers()))
}

View File

@@ -1,255 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"errors"
"math/rand"
"strconv"
"sync"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
)
func NewFakeControllerSource() *FakeControllerSource {
return &FakeControllerSource{
Items: map[nnu]runtime.Object{},
Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
}
}
func NewFakePVControllerSource() *FakePVControllerSource {
return &FakePVControllerSource{
FakeControllerSource{
Items: map[nnu]runtime.Object{},
Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
}}
}
func NewFakePVCControllerSource() *FakePVCControllerSource {
return &FakePVCControllerSource{
FakeControllerSource{
Items: map[nnu]runtime.Object{},
Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
}}
}
// FakeControllerSource implements listing/watching for testing.
type FakeControllerSource struct {
lock sync.RWMutex
Items map[nnu]runtime.Object
changes []watch.Event // one change per resourceVersion
Broadcaster *watch.Broadcaster
}
type FakePVControllerSource struct {
FakeControllerSource
}
type FakePVCControllerSource struct {
FakeControllerSource
}
// namespace, name, uid to be used as a key.
type nnu struct {
namespace, name string
uid types.UID
}
// Add adds an object to the set and sends an add event to watchers.
// obj's ResourceVersion is set.
func (f *FakeControllerSource) Add(obj runtime.Object) {
f.Change(watch.Event{Type: watch.Added, Object: obj}, 1)
}
// Modify updates an object in the set and sends a modified event to watchers.
// obj's ResourceVersion is set.
func (f *FakeControllerSource) Modify(obj runtime.Object) {
f.Change(watch.Event{Type: watch.Modified, Object: obj}, 1)
}
// Delete deletes an object from the set and sends a delete event to watchers.
// obj's ResourceVersion is set.
func (f *FakeControllerSource) Delete(lastValue runtime.Object) {
f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 1)
}
// AddDropWatch adds an object to the set but forgets to send an add event to
// watchers.
// obj's ResourceVersion is set.
func (f *FakeControllerSource) AddDropWatch(obj runtime.Object) {
f.Change(watch.Event{Type: watch.Added, Object: obj}, 0)
}
// ModifyDropWatch updates an object in the set but forgets to send a modify
// event to watchers.
// obj's ResourceVersion is set.
func (f *FakeControllerSource) ModifyDropWatch(obj runtime.Object) {
f.Change(watch.Event{Type: watch.Modified, Object: obj}, 0)
}
// DeleteDropWatch deletes an object from the set but forgets to send a delete
// event to watchers.
// obj's ResourceVersion is set.
func (f *FakeControllerSource) DeleteDropWatch(lastValue runtime.Object) {
f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 0)
}
func (f *FakeControllerSource) key(accessor metav1.Object) nnu {
return nnu{accessor.GetNamespace(), accessor.GetName(), accessor.GetUID()}
}
// Change records the given event (setting the object's resource version) and
// sends a watch event with the specified probability.
func (f *FakeControllerSource) Change(e watch.Event, watchProbability float64) {
f.lock.Lock()
defer f.lock.Unlock()
accessor, err := meta.Accessor(e.Object)
if err != nil {
panic(err) // this is test code only
}
resourceVersion := len(f.changes) + 1
accessor.SetResourceVersion(strconv.Itoa(resourceVersion))
f.changes = append(f.changes, e)
key := f.key(accessor)
switch e.Type {
case watch.Added, watch.Modified:
f.Items[key] = e.Object
case watch.Deleted:
delete(f.Items, key)
}
if rand.Float64() < watchProbability {
f.Broadcaster.Action(e.Type, e.Object)
}
}
func (f *FakeControllerSource) getListItemsLocked() ([]runtime.Object, error) {
list := make([]runtime.Object, 0, len(f.Items))
for _, obj := range f.Items {
// Must make a copy to allow clients to modify the object.
// Otherwise, if they make a change and write it back, they
// will inadvertently change our canonical copy (in
// addition to racing with other clients).
list = append(list, obj.DeepCopyObject())
}
return list, nil
}
// List returns a list object, with its resource version set.
func (f *FakeControllerSource) List(options metav1.ListOptions) (runtime.Object, error) {
f.lock.RLock()
defer f.lock.RUnlock()
list, err := f.getListItemsLocked()
if err != nil {
return nil, err
}
listObj := &v1.List{}
if err := meta.SetList(listObj, list); err != nil {
return nil, err
}
listAccessor, err := meta.ListAccessor(listObj)
if err != nil {
return nil, err
}
resourceVersion := len(f.changes)
listAccessor.SetResourceVersion(strconv.Itoa(resourceVersion))
return listObj, nil
}
// List returns a list object, with its resource version set.
func (f *FakePVControllerSource) List(options metav1.ListOptions) (runtime.Object, error) {
f.lock.RLock()
defer f.lock.RUnlock()
list, err := f.FakeControllerSource.getListItemsLocked()
if err != nil {
return nil, err
}
listObj := &v1.PersistentVolumeList{}
if err := meta.SetList(listObj, list); err != nil {
return nil, err
}
listAccessor, err := meta.ListAccessor(listObj)
if err != nil {
return nil, err
}
resourceVersion := len(f.changes)
listAccessor.SetResourceVersion(strconv.Itoa(resourceVersion))
return listObj, nil
}
// List returns a list object, with its resource version set.
func (f *FakePVCControllerSource) List(options metav1.ListOptions) (runtime.Object, error) {
f.lock.RLock()
defer f.lock.RUnlock()
list, err := f.FakeControllerSource.getListItemsLocked()
if err != nil {
return nil, err
}
listObj := &v1.PersistentVolumeClaimList{}
if err := meta.SetList(listObj, list); err != nil {
return nil, err
}
listAccessor, err := meta.ListAccessor(listObj)
if err != nil {
return nil, err
}
resourceVersion := len(f.changes)
listAccessor.SetResourceVersion(strconv.Itoa(resourceVersion))
return listObj, nil
}
// Watch returns a watch, which will be pre-populated with all changes
// after resourceVersion.
func (f *FakeControllerSource) Watch(options metav1.ListOptions) (watch.Interface, error) {
f.lock.RLock()
defer f.lock.RUnlock()
rc, err := strconv.Atoi(options.ResourceVersion)
if err != nil {
return nil, err
}
if rc < len(f.changes) {
changes := []watch.Event{}
for _, c := range f.changes[rc:] {
// Must make a copy to allow clients to modify the
// object. Otherwise, if they make a change and write
// it back, they will inadvertently change the our
// canonical copy (in addition to racing with other
// clients).
changes = append(changes, watch.Event{Type: c.Type, Object: c.Object.DeepCopyObject()})
}
return f.Broadcaster.WatchWithPrefix(changes), nil
} else if rc > len(f.changes) {
return nil, errors.New("resource version in the future not supported by this fake")
}
return f.Broadcaster.Watch(), nil
}
// Shutdown closes the underlying broadcaster, waiting for events to be
// delivered. It's an error to call any method after calling shutdown. This is
// enforced by Shutdown() leaving f locked.
func (f *FakeControllerSource) Shutdown() {
f.lock.Lock() // Purposely no unlock.
f.Broadcaster.Shutdown()
}

View File

@@ -1,95 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"sync"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
)
// ensure the watch delivers the requested and only the requested items.
func consume(t *testing.T, w watch.Interface, rvs []string, done *sync.WaitGroup) {
defer done.Done()
for _, rv := range rvs {
got, ok := <-w.ResultChan()
if !ok {
t.Errorf("%#v: unexpected channel close, wanted %v", rvs, rv)
return
}
gotRV := got.Object.(*v1.Pod).ObjectMeta.ResourceVersion
if e, a := rv, gotRV; e != a {
t.Errorf("wanted %v, got %v", e, a)
} else {
t.Logf("Got %v as expected", gotRV)
}
}
// We should not get anything else.
got, open := <-w.ResultChan()
if open {
t.Errorf("%#v: unwanted object %#v", rvs, got)
}
}
func TestRCNumber(t *testing.T) {
pod := func(name string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
}
}
wg := &sync.WaitGroup{}
wg.Add(3)
source := NewFakeControllerSource()
source.Add(pod("foo"))
source.Modify(pod("foo"))
source.Modify(pod("foo"))
w, err := source.Watch(metav1.ListOptions{ResourceVersion: "1"})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
go consume(t, w, []string{"2", "3"}, wg)
list, err := source.List(metav1.ListOptions{})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if e, a := "3", list.(*v1.List).ResourceVersion; e != a {
t.Errorf("wanted %v, got %v", e, a)
}
w2, err := source.Watch(metav1.ListOptions{ResourceVersion: "2"})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
go consume(t, w2, []string{"3"}, wg)
w3, err := source.Watch(metav1.ListOptions{ResourceVersion: "3"})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
go consume(t, w3, []string{}, wg)
source.Shutdown()
wg.Wait()
}

View File

@@ -1,131 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"reflect"
"testing"
)
// store_test.go checks that UndeltaStore conforms to the Store interface
// behavior. This test just tests that it calls the push func in addition.
type testUndeltaObject struct {
name string
val interface{}
}
func testUndeltaKeyFunc(obj interface{}) (string, error) {
return obj.(testUndeltaObject).name, nil
}
/*
var (
o1 interface{} = t{1}
o2 interface{} = t{2}
l1 []interface{} = []interface{}{t{1}}
)
*/
func TestUpdateCallsPush(t *testing.T) {
mkObj := func(name string, val interface{}) testUndeltaObject {
return testUndeltaObject{name: name, val: val}
}
var got []interface{}
var callcount int = 0
push := func(m []interface{}) {
callcount++
got = m
}
u := NewUndeltaStore(push, testUndeltaKeyFunc)
u.Add(mkObj("a", 2))
u.Update(mkObj("a", 1))
if callcount != 2 {
t.Errorf("Expected 2 calls, got %d", callcount)
}
l := []interface{}{mkObj("a", 1)}
if !reflect.DeepEqual(l, got) {
t.Errorf("Expected %#v, Got %#v", l, got)
}
}
func TestDeleteCallsPush(t *testing.T) {
mkObj := func(name string, val interface{}) testUndeltaObject {
return testUndeltaObject{name: name, val: val}
}
var got []interface{}
var callcount int = 0
push := func(m []interface{}) {
callcount++
got = m
}
u := NewUndeltaStore(push, testUndeltaKeyFunc)
u.Add(mkObj("a", 2))
u.Delete(mkObj("a", ""))
if callcount != 2 {
t.Errorf("Expected 2 calls, got %d", callcount)
}
expected := []interface{}{}
if !reflect.DeepEqual(expected, got) {
t.Errorf("Expected %#v, Got %#v", expected, got)
}
}
func TestReadsDoNotCallPush(t *testing.T) {
push := func(m []interface{}) {
t.Errorf("Unexpected call to push!")
}
u := NewUndeltaStore(push, testUndeltaKeyFunc)
// These should not call push.
_ = u.List()
_, _, _ = u.Get(testUndeltaObject{"a", ""})
}
func TestReplaceCallsPush(t *testing.T) {
mkObj := func(name string, val interface{}) testUndeltaObject {
return testUndeltaObject{name: name, val: val}
}
var got []interface{}
var callcount int = 0
push := func(m []interface{}) {
callcount++
got = m
}
u := NewUndeltaStore(push, testUndeltaKeyFunc)
m := []interface{}{mkObj("a", 1)}
u.Replace(m, "0")
if callcount != 1 {
t.Errorf("Expected 1 calls, got %d", callcount)
}
expected := []interface{}{mkObj("a", 1)}
if !reflect.DeepEqual(expected, got) {
t.Errorf("Expected %#v, Got %#v", expected, got)
}
}

View File

@@ -1,302 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
"fmt"
"io/ioutil"
"os"
"reflect"
"testing"
"sigs.k8s.io/yaml"
)
func newMergedConfig(certFile, certContent, keyFile, keyContent, caFile, caContent string, t *testing.T) Config {
if err := ioutil.WriteFile(certFile, []byte(certContent), 0644); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := ioutil.WriteFile(keyFile, []byte(keyContent), 0600); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := ioutil.WriteFile(caFile, []byte(caContent), 0644); err != nil {
t.Errorf("unexpected error: %v", err)
}
return Config{
AuthInfos: map[string]*AuthInfo{
"red-user": {Token: "red-token", ClientCertificateData: []byte(certContent), ClientKeyData: []byte(keyContent)},
"blue-user": {Token: "blue-token", ClientCertificate: certFile, ClientKey: keyFile}},
Clusters: map[string]*Cluster{
"cow-cluster": {Server: "http://cow.org:8080", CertificateAuthorityData: []byte(caContent)},
"chicken-cluster": {Server: "http://chicken.org:8080", CertificateAuthority: caFile}},
Contexts: map[string]*Context{
"federal-context": {AuthInfo: "red-user", Cluster: "cow-cluster"},
"shaker-context": {AuthInfo: "blue-user", Cluster: "chicken-cluster"}},
CurrentContext: "federal-context",
}
}
func TestMinifySuccess(t *testing.T) {
certFile, _ := ioutil.TempFile("", "")
defer os.Remove(certFile.Name())
keyFile, _ := ioutil.TempFile("", "")
defer os.Remove(keyFile.Name())
caFile, _ := ioutil.TempFile("", "")
defer os.Remove(caFile.Name())
mutatingConfig := newMergedConfig(certFile.Name(), "cert", keyFile.Name(), "key", caFile.Name(), "ca", t)
if err := MinifyConfig(&mutatingConfig); err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(mutatingConfig.Contexts) > 1 {
t.Errorf("unexpected contexts: %v", mutatingConfig.Contexts)
}
if _, exists := mutatingConfig.Contexts["federal-context"]; !exists {
t.Errorf("missing context")
}
if len(mutatingConfig.Clusters) > 1 {
t.Errorf("unexpected clusters: %v", mutatingConfig.Clusters)
}
if _, exists := mutatingConfig.Clusters["cow-cluster"]; !exists {
t.Errorf("missing cluster")
}
if len(mutatingConfig.AuthInfos) > 1 {
t.Errorf("unexpected users: %v", mutatingConfig.AuthInfos)
}
if _, exists := mutatingConfig.AuthInfos["red-user"]; !exists {
t.Errorf("missing user")
}
}
func TestMinifyMissingContext(t *testing.T) {
certFile, _ := ioutil.TempFile("", "")
defer os.Remove(certFile.Name())
keyFile, _ := ioutil.TempFile("", "")
defer os.Remove(keyFile.Name())
caFile, _ := ioutil.TempFile("", "")
defer os.Remove(caFile.Name())
mutatingConfig := newMergedConfig(certFile.Name(), "cert", keyFile.Name(), "key", caFile.Name(), "ca", t)
mutatingConfig.CurrentContext = "missing"
errMsg := "cannot locate context missing"
if err := MinifyConfig(&mutatingConfig); err == nil || err.Error() != errMsg {
t.Errorf("expected %v, got %v", errMsg, err)
}
}
func TestMinifyMissingCluster(t *testing.T) {
certFile, _ := ioutil.TempFile("", "")
defer os.Remove(certFile.Name())
keyFile, _ := ioutil.TempFile("", "")
defer os.Remove(keyFile.Name())
caFile, _ := ioutil.TempFile("", "")
defer os.Remove(caFile.Name())
mutatingConfig := newMergedConfig(certFile.Name(), "cert", keyFile.Name(), "key", caFile.Name(), "ca", t)
delete(mutatingConfig.Clusters, mutatingConfig.Contexts[mutatingConfig.CurrentContext].Cluster)
errMsg := "cannot locate cluster cow-cluster"
if err := MinifyConfig(&mutatingConfig); err == nil || err.Error() != errMsg {
t.Errorf("expected %v, got %v", errMsg, err)
}
}
func TestMinifyMissingAuthInfo(t *testing.T) {
certFile, _ := ioutil.TempFile("", "")
defer os.Remove(certFile.Name())
keyFile, _ := ioutil.TempFile("", "")
defer os.Remove(keyFile.Name())
caFile, _ := ioutil.TempFile("", "")
defer os.Remove(caFile.Name())
mutatingConfig := newMergedConfig(certFile.Name(), "cert", keyFile.Name(), "key", caFile.Name(), "ca", t)
delete(mutatingConfig.AuthInfos, mutatingConfig.Contexts[mutatingConfig.CurrentContext].AuthInfo)
errMsg := "cannot locate user red-user"
if err := MinifyConfig(&mutatingConfig); err == nil || err.Error() != errMsg {
t.Errorf("expected %v, got %v", errMsg, err)
}
}
func TestFlattenSuccess(t *testing.T) {
certFile, _ := ioutil.TempFile("", "")
defer os.Remove(certFile.Name())
keyFile, _ := ioutil.TempFile("", "")
defer os.Remove(keyFile.Name())
caFile, _ := ioutil.TempFile("", "")
defer os.Remove(caFile.Name())
certData := "cert"
keyData := "key"
caData := "ca"
unchangingCluster := "cow-cluster"
unchangingAuthInfo := "red-user"
changingCluster := "chicken-cluster"
changingAuthInfo := "blue-user"
startingConfig := newMergedConfig(certFile.Name(), certData, keyFile.Name(), keyData, caFile.Name(), caData, t)
mutatingConfig := startingConfig
if err := FlattenConfig(&mutatingConfig); err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(mutatingConfig.Contexts) != 2 {
t.Errorf("unexpected contexts: %v", mutatingConfig.Contexts)
}
if !reflect.DeepEqual(startingConfig.Contexts, mutatingConfig.Contexts) {
t.Errorf("expected %v, got %v", startingConfig.Contexts, mutatingConfig.Contexts)
}
if len(mutatingConfig.Clusters) != 2 {
t.Errorf("unexpected clusters: %v", mutatingConfig.Clusters)
}
if !reflect.DeepEqual(startingConfig.Clusters[unchangingCluster], mutatingConfig.Clusters[unchangingCluster]) {
t.Errorf("expected %v, got %v", startingConfig.Clusters[unchangingCluster], mutatingConfig.Clusters[unchangingCluster])
}
if len(mutatingConfig.Clusters[changingCluster].CertificateAuthority) != 0 {
t.Errorf("unexpected caFile")
}
if string(mutatingConfig.Clusters[changingCluster].CertificateAuthorityData) != caData {
t.Errorf("expected %v, got %v", caData, string(mutatingConfig.Clusters[changingCluster].CertificateAuthorityData))
}
if len(mutatingConfig.AuthInfos) != 2 {
t.Errorf("unexpected users: %v", mutatingConfig.AuthInfos)
}
if !reflect.DeepEqual(startingConfig.AuthInfos[unchangingAuthInfo], mutatingConfig.AuthInfos[unchangingAuthInfo]) {
t.Errorf("expected %v, got %v", startingConfig.AuthInfos[unchangingAuthInfo], mutatingConfig.AuthInfos[unchangingAuthInfo])
}
if len(mutatingConfig.AuthInfos[changingAuthInfo].ClientCertificate) != 0 {
t.Errorf("unexpected caFile")
}
if string(mutatingConfig.AuthInfos[changingAuthInfo].ClientCertificateData) != certData {
t.Errorf("expected %v, got %v", certData, string(mutatingConfig.AuthInfos[changingAuthInfo].ClientCertificateData))
}
if len(mutatingConfig.AuthInfos[changingAuthInfo].ClientKey) != 0 {
t.Errorf("unexpected caFile")
}
if string(mutatingConfig.AuthInfos[changingAuthInfo].ClientKeyData) != keyData {
t.Errorf("expected %v, got %v", keyData, string(mutatingConfig.AuthInfos[changingAuthInfo].ClientKeyData))
}
}
func Example_minifyAndShorten() {
certFile, _ := ioutil.TempFile("", "")
defer os.Remove(certFile.Name())
keyFile, _ := ioutil.TempFile("", "")
defer os.Remove(keyFile.Name())
caFile, _ := ioutil.TempFile("", "")
defer os.Remove(caFile.Name())
certData := "cert"
keyData := "key"
caData := "ca"
config := newMergedConfig(certFile.Name(), certData, keyFile.Name(), keyData, caFile.Name(), caData, nil)
MinifyConfig(&config)
ShortenConfig(&config)
output, _ := yaml.Marshal(config)
fmt.Printf("%s", string(output))
// Output:
// clusters:
// cow-cluster:
// LocationOfOrigin: ""
// certificate-authority-data: DATA+OMITTED
// server: http://cow.org:8080
// contexts:
// federal-context:
// LocationOfOrigin: ""
// cluster: cow-cluster
// user: red-user
// current-context: federal-context
// preferences: {}
// users:
// red-user:
// LocationOfOrigin: ""
// client-certificate-data: REDACTED
// client-key-data: REDACTED
// token: red-token
}
func TestShortenSuccess(t *testing.T) {
certFile, _ := ioutil.TempFile("", "")
defer os.Remove(certFile.Name())
keyFile, _ := ioutil.TempFile("", "")
defer os.Remove(keyFile.Name())
caFile, _ := ioutil.TempFile("", "")
defer os.Remove(caFile.Name())
certData := "cert"
keyData := "key"
caData := "ca"
unchangingCluster := "chicken-cluster"
unchangingAuthInfo := "blue-user"
changingCluster := "cow-cluster"
changingAuthInfo := "red-user"
startingConfig := newMergedConfig(certFile.Name(), certData, keyFile.Name(), keyData, caFile.Name(), caData, t)
mutatingConfig := startingConfig
ShortenConfig(&mutatingConfig)
if len(mutatingConfig.Contexts) != 2 {
t.Errorf("unexpected contexts: %v", mutatingConfig.Contexts)
}
if !reflect.DeepEqual(startingConfig.Contexts, mutatingConfig.Contexts) {
t.Errorf("expected %v, got %v", startingConfig.Contexts, mutatingConfig.Contexts)
}
redacted := string(redactedBytes)
dataOmitted := string(dataOmittedBytes)
if len(mutatingConfig.Clusters) != 2 {
t.Errorf("unexpected clusters: %v", mutatingConfig.Clusters)
}
if !reflect.DeepEqual(startingConfig.Clusters[unchangingCluster], mutatingConfig.Clusters[unchangingCluster]) {
t.Errorf("expected %v, got %v", startingConfig.Clusters[unchangingCluster], mutatingConfig.Clusters[unchangingCluster])
}
if string(mutatingConfig.Clusters[changingCluster].CertificateAuthorityData) != dataOmitted {
t.Errorf("expected %v, got %v", dataOmitted, string(mutatingConfig.Clusters[changingCluster].CertificateAuthorityData))
}
if len(mutatingConfig.AuthInfos) != 2 {
t.Errorf("unexpected users: %v", mutatingConfig.AuthInfos)
}
if !reflect.DeepEqual(startingConfig.AuthInfos[unchangingAuthInfo], mutatingConfig.AuthInfos[unchangingAuthInfo]) {
t.Errorf("expected %v, got %v", startingConfig.AuthInfos[unchangingAuthInfo], mutatingConfig.AuthInfos[unchangingAuthInfo])
}
if string(mutatingConfig.AuthInfos[changingAuthInfo].ClientCertificateData) != redacted {
t.Errorf("expected %v, got %v", redacted, string(mutatingConfig.AuthInfos[changingAuthInfo].ClientCertificateData))
}
if string(mutatingConfig.AuthInfos[changingAuthInfo].ClientKeyData) != redacted {
t.Errorf("expected %v, got %v", redacted, string(mutatingConfig.AuthInfos[changingAuthInfo].ClientKeyData))
}
}

View File

@@ -1,135 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
"fmt"
"sigs.k8s.io/yaml"
)
func Example_emptyConfig() {
defaultConfig := NewConfig()
output, err := yaml.Marshal(defaultConfig)
if err != nil {
fmt.Printf("Unexpected error: %v", err)
}
fmt.Printf("%v", string(output))
// Output:
// clusters: {}
// contexts: {}
// current-context: ""
// preferences: {}
// users: {}
}
func Example_ofOptionsConfig() {
defaultConfig := NewConfig()
defaultConfig.Preferences.Colors = true
defaultConfig.Clusters["alfa"] = &Cluster{
Server: "https://alfa.org:8080",
InsecureSkipTLSVerify: true,
CertificateAuthority: "path/to/my/cert-ca-filename",
}
defaultConfig.Clusters["bravo"] = &Cluster{
Server: "https://bravo.org:8080",
InsecureSkipTLSVerify: false,
}
defaultConfig.AuthInfos["white-mage-via-cert"] = &AuthInfo{
ClientCertificate: "path/to/my/client-cert-filename",
ClientKey: "path/to/my/client-key-filename",
}
defaultConfig.AuthInfos["red-mage-via-token"] = &AuthInfo{
Token: "my-secret-token",
}
defaultConfig.AuthInfos["black-mage-via-auth-provider"] = &AuthInfo{
AuthProvider: &AuthProviderConfig{
Name: "gcp",
Config: map[string]string{
"foo": "bar",
"token": "s3cr3t-t0k3n",
},
},
}
defaultConfig.Contexts["bravo-as-black-mage"] = &Context{
Cluster: "bravo",
AuthInfo: "black-mage-via-auth-provider",
Namespace: "yankee",
}
defaultConfig.Contexts["alfa-as-black-mage"] = &Context{
Cluster: "alfa",
AuthInfo: "black-mage-via-auth-provider",
Namespace: "zulu",
}
defaultConfig.Contexts["alfa-as-white-mage"] = &Context{
Cluster: "alfa",
AuthInfo: "white-mage-via-cert",
}
defaultConfig.CurrentContext = "alfa-as-white-mage"
output, err := yaml.Marshal(defaultConfig)
if err != nil {
fmt.Printf("Unexpected error: %v", err)
}
fmt.Printf("%v", string(output))
// Output:
// clusters:
// alfa:
// LocationOfOrigin: ""
// certificate-authority: path/to/my/cert-ca-filename
// insecure-skip-tls-verify: true
// server: https://alfa.org:8080
// bravo:
// LocationOfOrigin: ""
// server: https://bravo.org:8080
// contexts:
// alfa-as-black-mage:
// LocationOfOrigin: ""
// cluster: alfa
// namespace: zulu
// user: black-mage-via-auth-provider
// alfa-as-white-mage:
// LocationOfOrigin: ""
// cluster: alfa
// user: white-mage-via-cert
// bravo-as-black-mage:
// LocationOfOrigin: ""
// cluster: bravo
// namespace: yankee
// user: black-mage-via-auth-provider
// current-context: alfa-as-white-mage
// preferences:
// colors: true
// users:
// black-mage-via-auth-provider:
// LocationOfOrigin: ""
// auth-provider:
// config:
// foo: bar
// token: s3cr3t-t0k3n
// name: gcp
// red-mage-via-token:
// LocationOfOrigin: ""
// token: my-secret-token
// white-mage-via-cert:
// LocationOfOrigin: ""
// client-certificate: path/to/my/client-cert-filename
// client-key: path/to/my/client-key-filename
}

View File

@@ -1,695 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clientcmd
import (
"io/ioutil"
"net/http"
"os"
"reflect"
"strings"
"testing"
"github.com/imdario/mergo"
restclient "k8s.io/client-go/rest"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
func TestMergoSemantics(t *testing.T) {
type U struct {
A string
B int64
}
type T struct {
S []string
X string
Y int64
U U
}
var testDataStruct = []struct {
dst T
src T
expected T
}{
{
dst: T{X: "one"},
src: T{X: "two"},
expected: T{X: "two"},
},
{
dst: T{X: "one", Y: 5, U: U{A: "four", B: 6}},
src: T{X: "two", U: U{A: "three", B: 4}},
expected: T{X: "two", Y: 5, U: U{A: "three", B: 4}},
},
{
dst: T{S: []string{"test3", "test4", "test5"}},
src: T{S: []string{"test1", "test2", "test3"}},
expected: T{S: []string{"test1", "test2", "test3"}},
},
}
for _, data := range testDataStruct {
err := mergo.MergeWithOverwrite(&data.dst, &data.src)
if err != nil {
t.Errorf("error while merging: %s", err)
}
if !reflect.DeepEqual(data.dst, data.expected) {
// The mergo library has previously changed in a an incompatible way.
// example:
//
// https://github.com/imdario/mergo/commit/d304790b2ed594794496464fadd89d2bb266600a
//
// This test verifies that the semantics of the merge are what we expect.
// If they are not, the mergo library may have been updated and broken
// unexpectedly.
t.Errorf("mergo.MergeWithOverwrite did not provide expected output: %+v doesn't match %+v", data.dst, data.expected)
}
}
var testDataMap = []struct {
dst map[string]int
src map[string]int
expected map[string]int
}{
{
dst: map[string]int{"rsc": 6543, "r": 2138, "gri": 1908, "adg": 912, "prt": 22},
src: map[string]int{"rsc": 3711, "r": 2138, "gri": 1908, "adg": 912},
expected: map[string]int{"rsc": 3711, "r": 2138, "gri": 1908, "adg": 912, "prt": 22},
},
}
for _, data := range testDataMap {
err := mergo.MergeWithOverwrite(&data.dst, &data.src)
if err != nil {
t.Errorf("error while merging: %s", err)
}
if !reflect.DeepEqual(data.dst, data.expected) {
// The mergo library has previously changed in a an incompatible way.
// example:
//
// https://github.com/imdario/mergo/commit/d304790b2ed594794496464fadd89d2bb266600a
//
// This test verifies that the semantics of the merge are what we expect.
// If they are not, the mergo library may have been updated and broken
// unexpectedly.
t.Errorf("mergo.MergeWithOverwrite did not provide expected output: %+v doesn't match %+v", data.dst, data.expected)
}
}
}
func createValidTestConfig() *clientcmdapi.Config {
const (
server = "https://anything.com:8080"
token = "the-token"
)
config := clientcmdapi.NewConfig()
config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: server,
}
config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
Token: token,
}
config.Contexts["clean"] = &clientcmdapi.Context{
Cluster: "clean",
AuthInfo: "clean",
}
config.CurrentContext = "clean"
return config
}
func createCAValidTestConfig() *clientcmdapi.Config {
config := createValidTestConfig()
config.Clusters["clean"].CertificateAuthorityData = []byte{0, 0}
return config
}
func TestInsecureOverridesCA(t *testing.T) {
config := createCAValidTestConfig()
clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{
InsecureSkipTLSVerify: true,
},
}, nil)
actualCfg, err := clientBuilder.ClientConfig()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
matchBoolArg(true, actualCfg.Insecure, t)
matchStringArg("", actualCfg.TLSClientConfig.CAFile, t)
matchByteArg(nil, actualCfg.TLSClientConfig.CAData, t)
}
func TestMergeContext(t *testing.T) {
const namespace = "overridden-namespace"
config := createValidTestConfig()
clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{}, nil)
_, overridden, err := clientBuilder.Namespace()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if overridden {
t.Error("Expected namespace to not be overridden")
}
clientBuilder = NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{
Context: clientcmdapi.Context{
Namespace: namespace,
},
}, nil)
actual, overridden, err := clientBuilder.Namespace()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if !overridden {
t.Error("Expected namespace to be overridden")
}
matchStringArg(namespace, actual, t)
}
func TestModifyContext(t *testing.T) {
expectedCtx := map[string]bool{
"updated": true,
"clean": true,
}
tempPath, err := ioutil.TempFile("", "testclientcmd-")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
defer os.Remove(tempPath.Name())
pathOptions := NewDefaultPathOptions()
config := createValidTestConfig()
pathOptions.GlobalFile = tempPath.Name()
// define new context and assign it - our path options config
config.Contexts["updated"] = &clientcmdapi.Context{
Cluster: "updated",
AuthInfo: "updated",
}
config.CurrentContext = "updated"
if err := ModifyConfig(pathOptions, *config, true); err != nil {
t.Errorf("Unexpected error: %v", err)
}
startingConfig, err := pathOptions.GetStartingConfig()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// make sure the current context was updated
matchStringArg("updated", startingConfig.CurrentContext, t)
// there should now be two contexts
if len(startingConfig.Contexts) != len(expectedCtx) {
t.Fatalf("unexpected nuber of contexts, expecting %v, but found %v", len(expectedCtx), len(startingConfig.Contexts))
}
for key := range startingConfig.Contexts {
if !expectedCtx[key] {
t.Fatalf("expected context %q to exist", key)
}
}
}
func TestCertificateData(t *testing.T) {
caData := []byte("ca-data")
certData := []byte("cert-data")
keyData := []byte("key-data")
config := clientcmdapi.NewConfig()
config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: "https://localhost:8443",
CertificateAuthorityData: caData,
}
config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
ClientCertificateData: certData,
ClientKeyData: keyData,
}
config.Contexts["clean"] = &clientcmdapi.Context{
Cluster: "clean",
AuthInfo: "clean",
}
config.CurrentContext = "clean"
clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{}, nil)
clientConfig, err := clientBuilder.ClientConfig()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Make sure cert data gets into config (will override file paths)
matchByteArg(caData, clientConfig.TLSClientConfig.CAData, t)
matchByteArg(certData, clientConfig.TLSClientConfig.CertData, t)
matchByteArg(keyData, clientConfig.TLSClientConfig.KeyData, t)
}
func TestBasicAuthData(t *testing.T) {
username := "myuser"
password := "mypass"
config := clientcmdapi.NewConfig()
config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: "https://localhost:8443",
}
config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
Username: username,
Password: password,
}
config.Contexts["clean"] = &clientcmdapi.Context{
Cluster: "clean",
AuthInfo: "clean",
}
config.CurrentContext = "clean"
clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{}, nil)
clientConfig, err := clientBuilder.ClientConfig()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Make sure basic auth data gets into config
matchStringArg(username, clientConfig.Username, t)
matchStringArg(password, clientConfig.Password, t)
}
func TestBasicTokenFile(t *testing.T) {
token := "exampletoken"
f, err := ioutil.TempFile("", "tokenfile")
if err != nil {
t.Errorf("Unexpected error: %v", err)
return
}
defer os.Remove(f.Name())
if err := ioutil.WriteFile(f.Name(), []byte(token), 0644); err != nil {
t.Errorf("Unexpected error: %v", err)
return
}
config := clientcmdapi.NewConfig()
config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: "https://localhost:8443",
}
config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
TokenFile: f.Name(),
}
config.Contexts["clean"] = &clientcmdapi.Context{
Cluster: "clean",
AuthInfo: "clean",
}
config.CurrentContext = "clean"
clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{}, nil)
clientConfig, err := clientBuilder.ClientConfig()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var out *http.Request
clientConfig.WrapTransport(fakeTransport(func(req *http.Request) (*http.Response, error) {
out = req
return &http.Response{}, nil
})).RoundTrip(&http.Request{})
matchStringArg(token, strings.TrimPrefix(out.Header.Get("Authorization"), "Bearer "), t)
}
type fakeTransport func(*http.Request) (*http.Response, error)
func (ft fakeTransport) RoundTrip(req *http.Request) (*http.Response, error) {
return ft(req)
}
func TestPrecedenceTokenFile(t *testing.T) {
token := "exampletoken"
f, err := ioutil.TempFile("", "tokenfile")
if err != nil {
t.Errorf("Unexpected error: %v", err)
return
}
defer os.Remove(f.Name())
if err := ioutil.WriteFile(f.Name(), []byte(token), 0644); err != nil {
t.Errorf("Unexpected error: %v", err)
return
}
config := clientcmdapi.NewConfig()
config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: "https://localhost:8443",
}
expectedToken := "expected"
config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
Token: expectedToken,
TokenFile: f.Name(),
}
config.Contexts["clean"] = &clientcmdapi.Context{
Cluster: "clean",
AuthInfo: "clean",
}
config.CurrentContext = "clean"
clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{}, nil)
clientConfig, err := clientBuilder.ClientConfig()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
matchStringArg(expectedToken, clientConfig.BearerToken, t)
}
func TestCreateClean(t *testing.T) {
config := createValidTestConfig()
clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{}, nil)
clientConfig, err := clientBuilder.ClientConfig()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
matchStringArg(config.Clusters["clean"].Server, clientConfig.Host, t)
matchStringArg("", clientConfig.APIPath, t)
matchBoolArg(config.Clusters["clean"].InsecureSkipTLSVerify, clientConfig.Insecure, t)
matchStringArg(config.AuthInfos["clean"].Token, clientConfig.BearerToken, t)
}
func TestCreateCleanWithPrefix(t *testing.T) {
tt := []struct {
server string
host string
}{
{"https://anything.com:8080/foo/bar", "https://anything.com:8080/foo/bar"},
{"http://anything.com:8080/foo/bar", "http://anything.com:8080/foo/bar"},
{"http://anything.com:8080/foo/bar/", "http://anything.com:8080/foo/bar/"},
{"http://anything.com:8080/", "http://anything.com:8080/"},
{"http://anything.com:8080//", "http://anything.com:8080//"},
{"anything.com:8080/foo/bar", "anything.com:8080/foo/bar"},
{"anything.com:8080", "anything.com:8080"},
{"anything.com", "anything.com"},
{"anything", "anything"},
}
tt = append(tt, struct{ server, host string }{"", "http://localhost:8080"})
for _, tc := range tt {
config := createValidTestConfig()
cleanConfig := config.Clusters["clean"]
cleanConfig.Server = tc.server
config.Clusters["clean"] = cleanConfig
clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{
ClusterDefaults: clientcmdapi.Cluster{Server: "http://localhost:8080"},
}, nil)
clientConfig, err := clientBuilder.ClientConfig()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
matchStringArg(tc.host, clientConfig.Host, t)
}
}
func TestCreateCleanDefault(t *testing.T) {
config := createValidTestConfig()
clientBuilder := NewDefaultClientConfig(*config, &ConfigOverrides{})
clientConfig, err := clientBuilder.ClientConfig()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
matchStringArg(config.Clusters["clean"].Server, clientConfig.Host, t)
matchBoolArg(config.Clusters["clean"].InsecureSkipTLSVerify, clientConfig.Insecure, t)
matchStringArg(config.AuthInfos["clean"].Token, clientConfig.BearerToken, t)
}
func TestCreateCleanDefaultCluster(t *testing.T) {
config := createValidTestConfig()
clientBuilder := NewDefaultClientConfig(*config, &ConfigOverrides{
ClusterDefaults: clientcmdapi.Cluster{Server: "http://localhost:8080"},
})
clientConfig, err := clientBuilder.ClientConfig()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
matchStringArg(config.Clusters["clean"].Server, clientConfig.Host, t)
matchBoolArg(config.Clusters["clean"].InsecureSkipTLSVerify, clientConfig.Insecure, t)
matchStringArg(config.AuthInfos["clean"].Token, clientConfig.BearerToken, t)
}
func TestCreateMissingContextNoDefault(t *testing.T) {
const expectedErrorContains = "Context was not found for specified context"
config := createValidTestConfig()
clientBuilder := NewNonInteractiveClientConfig(*config, "not-present", &ConfigOverrides{}, nil)
_, err := clientBuilder.ClientConfig()
if err == nil {
t.Fatalf("Unexpected error: %v", err)
}
}
func TestCreateMissingContext(t *testing.T) {
const expectedErrorContains = "context was not found for specified context: not-present"
config := createValidTestConfig()
clientBuilder := NewNonInteractiveClientConfig(*config, "not-present", &ConfigOverrides{
ClusterDefaults: clientcmdapi.Cluster{Server: "http://localhost:8080"},
}, nil)
_, err := clientBuilder.ClientConfig()
if err == nil {
t.Fatalf("Expected error: %v", expectedErrorContains)
}
if !strings.Contains(err.Error(), expectedErrorContains) {
t.Fatalf("Expected error: %v, but got %v", expectedErrorContains, err)
}
}
func TestInClusterClientConfigPrecedence(t *testing.T) {
tt := []struct {
overrides *ConfigOverrides
}{
{
overrides: &ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{
Server: "https://host-from-overrides.com",
},
},
},
{
overrides: &ConfigOverrides{
AuthInfo: clientcmdapi.AuthInfo{
Token: "https://host-from-overrides.com",
},
},
},
{
overrides: &ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{
CertificateAuthority: "/path/to/ca-from-overrides.crt",
},
},
},
{
overrides: &ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{
Server: "https://host-from-overrides.com",
},
AuthInfo: clientcmdapi.AuthInfo{
Token: "https://host-from-overrides.com",
},
},
},
{
overrides: &ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{
Server: "https://host-from-overrides.com",
CertificateAuthority: "/path/to/ca-from-overrides.crt",
},
},
},
{
overrides: &ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{
CertificateAuthority: "/path/to/ca-from-overrides.crt",
},
AuthInfo: clientcmdapi.AuthInfo{
Token: "https://host-from-overrides.com",
},
},
},
{
overrides: &ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{
Server: "https://host-from-overrides.com",
CertificateAuthority: "/path/to/ca-from-overrides.crt",
},
AuthInfo: clientcmdapi.AuthInfo{
Token: "https://host-from-overrides.com",
},
},
},
{
overrides: &ConfigOverrides{},
},
}
for _, tc := range tt {
expectedServer := "https://host-from-cluster.com"
expectedToken := "token-from-cluster"
expectedCAFile := "/path/to/ca-from-cluster.crt"
icc := &inClusterClientConfig{
inClusterConfigProvider: func() (*restclient.Config, error) {
return &restclient.Config{
Host: expectedServer,
BearerToken: expectedToken,
TLSClientConfig: restclient.TLSClientConfig{
CAFile: expectedCAFile,
},
}, nil
},
overrides: tc.overrides,
}
clientConfig, err := icc.ClientConfig()
if err != nil {
t.Fatalf("Unxpected error: %v", err)
}
if overridenServer := tc.overrides.ClusterInfo.Server; len(overridenServer) > 0 {
expectedServer = overridenServer
}
if overridenToken := tc.overrides.AuthInfo.Token; len(overridenToken) > 0 {
expectedToken = overridenToken
}
if overridenCAFile := tc.overrides.ClusterInfo.CertificateAuthority; len(overridenCAFile) > 0 {
expectedCAFile = overridenCAFile
}
if clientConfig.Host != expectedServer {
t.Errorf("Expected server %v, got %v", expectedServer, clientConfig.Host)
}
if clientConfig.BearerToken != expectedToken {
t.Errorf("Expected token %v, got %v", expectedToken, clientConfig.BearerToken)
}
if clientConfig.TLSClientConfig.CAFile != expectedCAFile {
t.Errorf("Expected Certificate Authority %v, got %v", expectedCAFile, clientConfig.TLSClientConfig.CAFile)
}
}
}
func matchBoolArg(expected, got bool, t *testing.T) {
if expected != got {
t.Errorf("Expected %v, got %v", expected, got)
}
}
func matchStringArg(expected, got string, t *testing.T) {
if expected != got {
t.Errorf("Expected %q, got %q", expected, got)
}
}
func matchByteArg(expected, got []byte, t *testing.T) {
if !reflect.DeepEqual(expected, got) {
t.Errorf("Expected %v, got %v", expected, got)
}
}
func TestNamespaceOverride(t *testing.T) {
config := &DirectClientConfig{
overrides: &ConfigOverrides{
Context: clientcmdapi.Context{
Namespace: "foo",
},
},
}
ns, overridden, err := config.Namespace()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if !overridden {
t.Errorf("Expected overridden = true")
}
matchStringArg("foo", ns, t)
}
func TestAuthConfigMerge(t *testing.T) {
content := `
apiVersion: v1
clusters:
- cluster:
server: https://localhost:8080
name: foo-cluster
contexts:
- context:
cluster: foo-cluster
user: foo-user
namespace: bar
name: foo-context
current-context: foo-context
kind: Config
users:
- name: foo-user
user:
exec:
apiVersion: client.authentication.k8s.io/v1alpha1
args:
- arg-1
- arg-2
command: foo-command
`
tmpfile, err := ioutil.TempFile("", "kubeconfig")
if err != nil {
t.Error(err)
}
defer os.Remove(tmpfile.Name())
if err := ioutil.WriteFile(tmpfile.Name(), []byte(content), 0666); err != nil {
t.Error(err)
}
config, err := BuildConfigFromFlags("", tmpfile.Name())
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(config.ExecProvider.Args, []string{"arg-1", "arg-2"}) {
t.Errorf("Got args %v when they should be %v\n", config.ExecProvider.Args, []string{"arg-1", "arg-2"})
}
}

View File

@@ -1,789 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clientcmd
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"reflect"
"strings"
"testing"
"sigs.k8s.io/yaml"
"k8s.io/apimachinery/pkg/runtime"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest"
)
var (
testConfigAlfa = clientcmdapi.Config{
AuthInfos: map[string]*clientcmdapi.AuthInfo{
"red-user": {Token: "red-token"}},
Clusters: map[string]*clientcmdapi.Cluster{
"cow-cluster": {Server: "http://cow.org:8080"}},
Contexts: map[string]*clientcmdapi.Context{
"federal-context": {AuthInfo: "red-user", Cluster: "cow-cluster", Namespace: "hammer-ns"}},
}
testConfigBravo = clientcmdapi.Config{
AuthInfos: map[string]*clientcmdapi.AuthInfo{
"black-user": {Token: "black-token"}},
Clusters: map[string]*clientcmdapi.Cluster{
"pig-cluster": {Server: "http://pig.org:8080"}},
Contexts: map[string]*clientcmdapi.Context{
"queen-anne-context": {AuthInfo: "black-user", Cluster: "pig-cluster", Namespace: "saw-ns"}},
}
testConfigCharlie = clientcmdapi.Config{
AuthInfos: map[string]*clientcmdapi.AuthInfo{
"green-user": {Token: "green-token"}},
Clusters: map[string]*clientcmdapi.Cluster{
"horse-cluster": {Server: "http://horse.org:8080"}},
Contexts: map[string]*clientcmdapi.Context{
"shaker-context": {AuthInfo: "green-user", Cluster: "horse-cluster", Namespace: "chisel-ns"}},
}
testConfigDelta = clientcmdapi.Config{
AuthInfos: map[string]*clientcmdapi.AuthInfo{
"blue-user": {Token: "blue-token"}},
Clusters: map[string]*clientcmdapi.Cluster{
"chicken-cluster": {Server: "http://chicken.org:8080"}},
Contexts: map[string]*clientcmdapi.Context{
"gothic-context": {AuthInfo: "blue-user", Cluster: "chicken-cluster", Namespace: "plane-ns"}},
}
testConfigConflictAlfa = clientcmdapi.Config{
AuthInfos: map[string]*clientcmdapi.AuthInfo{
"red-user": {Token: "a-different-red-token"},
"yellow-user": {Token: "yellow-token"}},
Clusters: map[string]*clientcmdapi.Cluster{
"cow-cluster": {Server: "http://a-different-cow.org:8080", InsecureSkipTLSVerify: true},
"donkey-cluster": {Server: "http://donkey.org:8080", InsecureSkipTLSVerify: true}},
CurrentContext: "federal-context",
}
)
func TestNonExistentCommandLineFile(t *testing.T) {
loadingRules := ClientConfigLoadingRules{
ExplicitPath: "bogus_file",
}
_, err := loadingRules.Load()
if err == nil {
t.Fatalf("Expected error for missing command-line file, got none")
}
if !strings.Contains(err.Error(), "bogus_file") {
t.Fatalf("Expected error about 'bogus_file', got %s", err.Error())
}
}
func TestToleratingMissingFiles(t *testing.T) {
loadingRules := ClientConfigLoadingRules{
Precedence: []string{"bogus1", "bogus2", "bogus3"},
}
_, err := loadingRules.Load()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
func TestErrorReadingFile(t *testing.T) {
commandLineFile, _ := ioutil.TempFile("", "")
defer os.Remove(commandLineFile.Name())
if err := ioutil.WriteFile(commandLineFile.Name(), []byte("bogus value"), 0644); err != nil {
t.Fatalf("Error creating tempfile: %v", err)
}
loadingRules := ClientConfigLoadingRules{
ExplicitPath: commandLineFile.Name(),
}
_, err := loadingRules.Load()
if err == nil {
t.Fatalf("Expected error for unloadable file, got none")
}
if !strings.Contains(err.Error(), commandLineFile.Name()) {
t.Fatalf("Expected error about '%s', got %s", commandLineFile.Name(), err.Error())
}
}
func TestErrorReadingNonFile(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Couldn't create tmpdir")
}
defer os.RemoveAll(tmpdir)
loadingRules := ClientConfigLoadingRules{
ExplicitPath: tmpdir,
}
_, err = loadingRules.Load()
if err == nil {
t.Fatalf("Expected error for non-file, got none")
}
if !strings.Contains(err.Error(), tmpdir) {
t.Fatalf("Expected error about '%s', got %s", tmpdir, err.Error())
}
}
func TestConflictingCurrentContext(t *testing.T) {
commandLineFile, _ := ioutil.TempFile("", "")
defer os.Remove(commandLineFile.Name())
envVarFile, _ := ioutil.TempFile("", "")
defer os.Remove(envVarFile.Name())
mockCommandLineConfig := clientcmdapi.Config{
CurrentContext: "any-context-value",
}
mockEnvVarConfig := clientcmdapi.Config{
CurrentContext: "a-different-context",
}
WriteToFile(mockCommandLineConfig, commandLineFile.Name())
WriteToFile(mockEnvVarConfig, envVarFile.Name())
loadingRules := ClientConfigLoadingRules{
ExplicitPath: commandLineFile.Name(),
Precedence: []string{envVarFile.Name()},
}
mergedConfig, err := loadingRules.Load()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if mergedConfig.CurrentContext != mockCommandLineConfig.CurrentContext {
t.Errorf("expected %v, got %v", mockCommandLineConfig.CurrentContext, mergedConfig.CurrentContext)
}
}
func TestLoadingEmptyMaps(t *testing.T) {
configFile, _ := ioutil.TempFile("", "")
defer os.Remove(configFile.Name())
mockConfig := clientcmdapi.Config{
CurrentContext: "any-context-value",
}
WriteToFile(mockConfig, configFile.Name())
config, err := LoadFromFile(configFile.Name())
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if config.Clusters == nil {
t.Error("expected config.Clusters to be non-nil")
}
if config.AuthInfos == nil {
t.Error("expected config.AuthInfos to be non-nil")
}
if config.Contexts == nil {
t.Error("expected config.Contexts to be non-nil")
}
}
func TestDuplicateClusterName(t *testing.T) {
configFile, _ := ioutil.TempFile("", "")
defer os.Remove(configFile.Name())
err := ioutil.WriteFile(configFile.Name(), []byte(`
kind: Config
apiVersion: v1
clusters:
- cluster:
api-version: v1
server: https://kubernetes.default.svc:443
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
name: kubeconfig-cluster
- cluster:
api-version: v2
server: https://test.example.server:443
certificate-authority: /var/run/secrets/test.example.io/serviceaccount/ca.crt
name: kubeconfig-cluster
contexts:
- context:
cluster: kubeconfig-cluster
namespace: default
user: kubeconfig-user
name: kubeconfig-context
current-context: kubeconfig-context
users:
- name: kubeconfig-user
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
`), os.FileMode(0755))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
_, err = LoadFromFile(configFile.Name())
if err == nil || !strings.Contains(err.Error(),
"error converting *[]NamedCluster into *map[string]*api.Cluster: duplicate name \"kubeconfig-cluster\" in list") {
t.Error("Expected error in loading duplicate cluster name, got none")
}
}
func TestDuplicateContextName(t *testing.T) {
configFile, _ := ioutil.TempFile("", "")
defer os.Remove(configFile.Name())
err := ioutil.WriteFile(configFile.Name(), []byte(`
kind: Config
apiVersion: v1
clusters:
- cluster:
api-version: v1
server: https://kubernetes.default.svc:443
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
name: kubeconfig-cluster
contexts:
- context:
cluster: kubeconfig-cluster
namespace: default
user: kubeconfig-user
name: kubeconfig-context
- context:
cluster: test-example-cluster
namespace: test-example
user: test-example-user
name: kubeconfig-context
current-context: kubeconfig-context
users:
- name: kubeconfig-user
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
`), os.FileMode(0755))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
_, err = LoadFromFile(configFile.Name())
if err == nil || !strings.Contains(err.Error(),
"error converting *[]NamedContext into *map[string]*api.Context: duplicate name \"kubeconfig-context\" in list") {
t.Error("Expected error in loading duplicate context name, got none")
}
}
func TestDuplicateUserName(t *testing.T) {
configFile, _ := ioutil.TempFile("", "")
defer os.Remove(configFile.Name())
err := ioutil.WriteFile(configFile.Name(), []byte(`
kind: Config
apiVersion: v1
clusters:
- cluster:
api-version: v1
server: https://kubernetes.default.svc:443
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
name: kubeconfig-cluster
contexts:
- context:
cluster: kubeconfig-cluster
namespace: default
user: kubeconfig-user
name: kubeconfig-context
current-context: kubeconfig-context
users:
- name: kubeconfig-user
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
- name: kubeconfig-user
user:
tokenFile: /var/run/secrets/test.example.com/serviceaccount/token
`), os.FileMode(0755))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
_, err = LoadFromFile(configFile.Name())
if err == nil || !strings.Contains(err.Error(),
"error converting *[]NamedAuthInfo into *map[string]*api.AuthInfo: duplicate name \"kubeconfig-user\" in list") {
t.Error("Expected error in loading duplicate user name, got none")
}
}
func TestDuplicateExtensionName(t *testing.T) {
configFile, _ := ioutil.TempFile("", "")
defer os.Remove(configFile.Name())
err := ioutil.WriteFile(configFile.Name(), []byte(`
kind: Config
apiVersion: v1
clusters:
- cluster:
api-version: v1
server: https://kubernetes.default.svc:443
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
name: kubeconfig-cluster
contexts:
- context:
cluster: kubeconfig-cluster
namespace: default
user: kubeconfig-user
name: kubeconfig-context
current-context: kubeconfig-context
users:
- name: kubeconfig-user
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
extensions:
- extension:
bytes: test
name: test-extension
- extension:
bytes: some-example
name: test-extension
`), os.FileMode(0755))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
_, err = LoadFromFile(configFile.Name())
if err == nil || !strings.Contains(err.Error(),
"error converting *[]NamedExtension into *map[string]runtime.Object: duplicate name \"test-extension\" in list") {
t.Error("Expected error in loading duplicate extension name, got none")
}
}
func TestResolveRelativePaths(t *testing.T) {
pathResolutionConfig1 := clientcmdapi.Config{
AuthInfos: map[string]*clientcmdapi.AuthInfo{
"relative-user-1": {ClientCertificate: "relative/client/cert", ClientKey: "../relative/client/key"},
"absolute-user-1": {ClientCertificate: "/absolute/client/cert", ClientKey: "/absolute/client/key"},
"relative-cmd-1": {Exec: &clientcmdapi.ExecConfig{Command: "../relative/client/cmd"}},
"absolute-cmd-1": {Exec: &clientcmdapi.ExecConfig{Command: "/absolute/client/cmd"}},
"PATH-cmd-1": {Exec: &clientcmdapi.ExecConfig{Command: "cmd"}},
},
Clusters: map[string]*clientcmdapi.Cluster{
"relative-server-1": {CertificateAuthority: "../relative/ca"},
"absolute-server-1": {CertificateAuthority: "/absolute/ca"},
},
}
pathResolutionConfig2 := clientcmdapi.Config{
AuthInfos: map[string]*clientcmdapi.AuthInfo{
"relative-user-2": {ClientCertificate: "relative/client/cert2", ClientKey: "../relative/client/key2"},
"absolute-user-2": {ClientCertificate: "/absolute/client/cert2", ClientKey: "/absolute/client/key2"},
},
Clusters: map[string]*clientcmdapi.Cluster{
"relative-server-2": {CertificateAuthority: "../relative/ca2"},
"absolute-server-2": {CertificateAuthority: "/absolute/ca2"},
},
}
configDir1, _ := ioutil.TempDir("", "")
defer os.RemoveAll(configDir1)
configFile1 := path.Join(configDir1, ".kubeconfig")
configDir1, _ = filepath.Abs(configDir1)
configDir2, _ := ioutil.TempDir("", "")
defer os.RemoveAll(configDir2)
configDir2, _ = ioutil.TempDir(configDir2, "")
configFile2 := path.Join(configDir2, ".kubeconfig")
configDir2, _ = filepath.Abs(configDir2)
WriteToFile(pathResolutionConfig1, configFile1)
WriteToFile(pathResolutionConfig2, configFile2)
loadingRules := ClientConfigLoadingRules{
Precedence: []string{configFile1, configFile2},
}
mergedConfig, err := loadingRules.Load()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
foundClusterCount := 0
for key, cluster := range mergedConfig.Clusters {
if key == "relative-server-1" {
foundClusterCount++
matchStringArg(path.Join(configDir1, pathResolutionConfig1.Clusters["relative-server-1"].CertificateAuthority), cluster.CertificateAuthority, t)
}
if key == "relative-server-2" {
foundClusterCount++
matchStringArg(path.Join(configDir2, pathResolutionConfig2.Clusters["relative-server-2"].CertificateAuthority), cluster.CertificateAuthority, t)
}
if key == "absolute-server-1" {
foundClusterCount++
matchStringArg(pathResolutionConfig1.Clusters["absolute-server-1"].CertificateAuthority, cluster.CertificateAuthority, t)
}
if key == "absolute-server-2" {
foundClusterCount++
matchStringArg(pathResolutionConfig2.Clusters["absolute-server-2"].CertificateAuthority, cluster.CertificateAuthority, t)
}
}
if foundClusterCount != 4 {
t.Errorf("Expected 4 clusters, found %v: %v", foundClusterCount, mergedConfig.Clusters)
}
foundAuthInfoCount := 0
for key, authInfo := range mergedConfig.AuthInfos {
if key == "relative-user-1" {
foundAuthInfoCount++
matchStringArg(path.Join(configDir1, pathResolutionConfig1.AuthInfos["relative-user-1"].ClientCertificate), authInfo.ClientCertificate, t)
matchStringArg(path.Join(configDir1, pathResolutionConfig1.AuthInfos["relative-user-1"].ClientKey), authInfo.ClientKey, t)
}
if key == "relative-user-2" {
foundAuthInfoCount++
matchStringArg(path.Join(configDir2, pathResolutionConfig2.AuthInfos["relative-user-2"].ClientCertificate), authInfo.ClientCertificate, t)
matchStringArg(path.Join(configDir2, pathResolutionConfig2.AuthInfos["relative-user-2"].ClientKey), authInfo.ClientKey, t)
}
if key == "absolute-user-1" {
foundAuthInfoCount++
matchStringArg(pathResolutionConfig1.AuthInfos["absolute-user-1"].ClientCertificate, authInfo.ClientCertificate, t)
matchStringArg(pathResolutionConfig1.AuthInfos["absolute-user-1"].ClientKey, authInfo.ClientKey, t)
}
if key == "absolute-user-2" {
foundAuthInfoCount++
matchStringArg(pathResolutionConfig2.AuthInfos["absolute-user-2"].ClientCertificate, authInfo.ClientCertificate, t)
matchStringArg(pathResolutionConfig2.AuthInfos["absolute-user-2"].ClientKey, authInfo.ClientKey, t)
}
if key == "relative-cmd-1" {
foundAuthInfoCount++
matchStringArg(path.Join(configDir1, pathResolutionConfig1.AuthInfos[key].Exec.Command), authInfo.Exec.Command, t)
}
if key == "absolute-cmd-1" {
foundAuthInfoCount++
matchStringArg(pathResolutionConfig1.AuthInfos[key].Exec.Command, authInfo.Exec.Command, t)
}
if key == "PATH-cmd-1" {
foundAuthInfoCount++
matchStringArg(pathResolutionConfig1.AuthInfos[key].Exec.Command, authInfo.Exec.Command, t)
}
}
if foundAuthInfoCount != 7 {
t.Errorf("Expected 7 users, found %v: %v", foundAuthInfoCount, mergedConfig.AuthInfos)
}
}
func TestMigratingFile(t *testing.T) {
sourceFile, _ := ioutil.TempFile("", "")
defer os.Remove(sourceFile.Name())
destinationFile, _ := ioutil.TempFile("", "")
// delete the file so that we'll write to it
os.Remove(destinationFile.Name())
WriteToFile(testConfigAlfa, sourceFile.Name())
loadingRules := ClientConfigLoadingRules{
MigrationRules: map[string]string{destinationFile.Name(): sourceFile.Name()},
}
if _, err := loadingRules.Load(); err != nil {
t.Errorf("unexpected error %v", err)
}
// the load should have recreated this file
defer os.Remove(destinationFile.Name())
sourceContent, err := ioutil.ReadFile(sourceFile.Name())
if err != nil {
t.Errorf("unexpected error %v", err)
}
destinationContent, err := ioutil.ReadFile(destinationFile.Name())
if err != nil {
t.Errorf("unexpected error %v", err)
}
if !reflect.DeepEqual(sourceContent, destinationContent) {
t.Errorf("source and destination do not match")
}
}
func TestMigratingFileLeaveExistingFileAlone(t *testing.T) {
sourceFile, _ := ioutil.TempFile("", "")
defer os.Remove(sourceFile.Name())
destinationFile, _ := ioutil.TempFile("", "")
defer os.Remove(destinationFile.Name())
WriteToFile(testConfigAlfa, sourceFile.Name())
loadingRules := ClientConfigLoadingRules{
MigrationRules: map[string]string{destinationFile.Name(): sourceFile.Name()},
}
if _, err := loadingRules.Load(); err != nil {
t.Errorf("unexpected error %v", err)
}
destinationContent, err := ioutil.ReadFile(destinationFile.Name())
if err != nil {
t.Errorf("unexpected error %v", err)
}
if len(destinationContent) > 0 {
t.Errorf("destination should not have been touched")
}
}
func TestMigratingFileSourceMissingSkip(t *testing.T) {
sourceFilename := "some-missing-file"
destinationFile, _ := ioutil.TempFile("", "")
// delete the file so that we'll write to it
os.Remove(destinationFile.Name())
loadingRules := ClientConfigLoadingRules{
MigrationRules: map[string]string{destinationFile.Name(): sourceFilename},
}
if _, err := loadingRules.Load(); err != nil {
t.Errorf("unexpected error %v", err)
}
if _, err := os.Stat(destinationFile.Name()); !os.IsNotExist(err) {
t.Errorf("destination should not exist")
}
}
func TestFileLocking(t *testing.T) {
f, _ := ioutil.TempFile("", "")
defer os.Remove(f.Name())
err := lockFile(f.Name())
if err != nil {
t.Errorf("unexpected error while locking file: %v", err)
}
defer unlockFile(f.Name())
err = lockFile(f.Name())
if err == nil {
t.Error("expected error while locking file.")
}
}
func Example_noMergingOnExplicitPaths() {
commandLineFile, _ := ioutil.TempFile("", "")
defer os.Remove(commandLineFile.Name())
envVarFile, _ := ioutil.TempFile("", "")
defer os.Remove(envVarFile.Name())
WriteToFile(testConfigAlfa, commandLineFile.Name())
WriteToFile(testConfigConflictAlfa, envVarFile.Name())
loadingRules := ClientConfigLoadingRules{
ExplicitPath: commandLineFile.Name(),
Precedence: []string{envVarFile.Name()},
}
mergedConfig, err := loadingRules.Load()
json, err := runtime.Encode(clientcmdlatest.Codec, mergedConfig)
if err != nil {
fmt.Printf("Unexpected error: %v", err)
}
output, err := yaml.JSONToYAML(json)
if err != nil {
fmt.Printf("Unexpected error: %v", err)
}
fmt.Printf("%v", string(output))
// Output:
// apiVersion: v1
// clusters:
// - cluster:
// server: http://cow.org:8080
// name: cow-cluster
// contexts:
// - context:
// cluster: cow-cluster
// namespace: hammer-ns
// user: red-user
// name: federal-context
// current-context: ""
// kind: Config
// preferences: {}
// users:
// - name: red-user
// user:
// token: red-token
}
func Example_mergingSomeWithConflict() {
commandLineFile, _ := ioutil.TempFile("", "")
defer os.Remove(commandLineFile.Name())
envVarFile, _ := ioutil.TempFile("", "")
defer os.Remove(envVarFile.Name())
WriteToFile(testConfigAlfa, commandLineFile.Name())
WriteToFile(testConfigConflictAlfa, envVarFile.Name())
loadingRules := ClientConfigLoadingRules{
Precedence: []string{commandLineFile.Name(), envVarFile.Name()},
}
mergedConfig, err := loadingRules.Load()
json, err := runtime.Encode(clientcmdlatest.Codec, mergedConfig)
if err != nil {
fmt.Printf("Unexpected error: %v", err)
}
output, err := yaml.JSONToYAML(json)
if err != nil {
fmt.Printf("Unexpected error: %v", err)
}
fmt.Printf("%v", string(output))
// Output:
// apiVersion: v1
// clusters:
// - cluster:
// server: http://cow.org:8080
// name: cow-cluster
// - cluster:
// insecure-skip-tls-verify: true
// server: http://donkey.org:8080
// name: donkey-cluster
// contexts:
// - context:
// cluster: cow-cluster
// namespace: hammer-ns
// user: red-user
// name: federal-context
// current-context: federal-context
// kind: Config
// preferences: {}
// users:
// - name: red-user
// user:
// token: red-token
// - name: yellow-user
// user:
// token: yellow-token
}
func Example_mergingEverythingNoConflicts() {
commandLineFile, _ := ioutil.TempFile("", "")
defer os.Remove(commandLineFile.Name())
envVarFile, _ := ioutil.TempFile("", "")
defer os.Remove(envVarFile.Name())
currentDirFile, _ := ioutil.TempFile("", "")
defer os.Remove(currentDirFile.Name())
homeDirFile, _ := ioutil.TempFile("", "")
defer os.Remove(homeDirFile.Name())
WriteToFile(testConfigAlfa, commandLineFile.Name())
WriteToFile(testConfigBravo, envVarFile.Name())
WriteToFile(testConfigCharlie, currentDirFile.Name())
WriteToFile(testConfigDelta, homeDirFile.Name())
loadingRules := ClientConfigLoadingRules{
Precedence: []string{commandLineFile.Name(), envVarFile.Name(), currentDirFile.Name(), homeDirFile.Name()},
}
mergedConfig, err := loadingRules.Load()
json, err := runtime.Encode(clientcmdlatest.Codec, mergedConfig)
if err != nil {
fmt.Printf("Unexpected error: %v", err)
}
output, err := yaml.JSONToYAML(json)
if err != nil {
fmt.Printf("Unexpected error: %v", err)
}
fmt.Printf("%v", string(output))
// Output:
// apiVersion: v1
// clusters:
// - cluster:
// server: http://chicken.org:8080
// name: chicken-cluster
// - cluster:
// server: http://cow.org:8080
// name: cow-cluster
// - cluster:
// server: http://horse.org:8080
// name: horse-cluster
// - cluster:
// server: http://pig.org:8080
// name: pig-cluster
// contexts:
// - context:
// cluster: cow-cluster
// namespace: hammer-ns
// user: red-user
// name: federal-context
// - context:
// cluster: chicken-cluster
// namespace: plane-ns
// user: blue-user
// name: gothic-context
// - context:
// cluster: pig-cluster
// namespace: saw-ns
// user: black-user
// name: queen-anne-context
// - context:
// cluster: horse-cluster
// namespace: chisel-ns
// user: green-user
// name: shaker-context
// current-context: ""
// kind: Config
// preferences: {}
// users:
// - name: black-user
// user:
// token: black-token
// - name: blue-user
// user:
// token: blue-token
// - name: green-user
// user:
// token: green-token
// - name: red-user
// user:
// token: red-token
}
func TestDeduplicate(t *testing.T) {
testCases := []struct {
src []string
expect []string
}{
{
src: []string{"a", "b", "c", "d", "e", "f"},
expect: []string{"a", "b", "c", "d", "e", "f"},
},
{
src: []string{"a", "b", "c", "b", "e", "f"},
expect: []string{"a", "b", "c", "e", "f"},
},
{
src: []string{"a", "a", "b", "b", "c", "b"},
expect: []string{"a", "b", "c"},
},
}
for _, testCase := range testCases {
get := deduplicate(testCase.src)
if !reflect.DeepEqual(get, testCase.expect) {
t.Errorf("expect: %v, get: %v", testCase.expect, get)
}
}
}

View File

@@ -1,328 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clientcmd
import (
"fmt"
"testing"
restclient "k8s.io/client-go/rest"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
type testLoader struct {
ClientConfigLoader
called bool
config *clientcmdapi.Config
err error
}
func (l *testLoader) Load() (*clientcmdapi.Config, error) {
l.called = true
return l.config, l.err
}
type testClientConfig struct {
config *restclient.Config
namespace string
namespaceSpecified bool
err error
}
func (c *testClientConfig) RawConfig() (clientcmdapi.Config, error) {
return clientcmdapi.Config{}, fmt.Errorf("unexpected call")
}
func (c *testClientConfig) ClientConfig() (*restclient.Config, error) {
return c.config, c.err
}
func (c *testClientConfig) Namespace() (string, bool, error) {
return c.namespace, c.namespaceSpecified, c.err
}
func (c *testClientConfig) ConfigAccess() ConfigAccess {
return nil
}
type testICC struct {
testClientConfig
possible bool
called bool
}
func (icc *testICC) Possible() bool {
icc.called = true
return icc.possible
}
func TestInClusterConfig(t *testing.T) {
default1 := &DirectClientConfig{
config: *createValidTestConfig(),
contextName: "clean",
overrides: &ConfigOverrides{},
}
invalidDefaultConfig := clientcmdapi.NewConfig()
invalidDefaultConfig.Clusters["clean"] = &clientcmdapi.Cluster{
Server: "http://localhost:8080",
}
invalidDefaultConfig.Contexts["other"] = &clientcmdapi.Context{
Cluster: "clean",
}
invalidDefaultConfig.CurrentContext = "clean"
defaultInvalid := &DirectClientConfig{
config: *invalidDefaultConfig,
overrides: &ConfigOverrides{},
}
if _, err := defaultInvalid.ClientConfig(); err == nil || !IsConfigurationInvalid(err) {
t.Fatal(err)
}
config1, err := default1.ClientConfig()
if err != nil {
t.Fatal(err)
}
config2 := &restclient.Config{Host: "config2"}
err1 := fmt.Errorf("unique error")
testCases := map[string]struct {
clientConfig *testClientConfig
icc *testICC
defaultConfig *DirectClientConfig
checkedICC bool
result *restclient.Config
err error
}{
"in-cluster checked on other error": {
clientConfig: &testClientConfig{err: ErrEmptyConfig},
icc: &testICC{},
checkedICC: true,
result: nil,
err: ErrEmptyConfig,
},
"in-cluster not checked on non-empty error": {
clientConfig: &testClientConfig{err: ErrEmptyCluster},
icc: &testICC{},
checkedICC: false,
result: nil,
err: ErrEmptyCluster,
},
"in-cluster checked when config is default": {
defaultConfig: default1,
clientConfig: &testClientConfig{config: config1},
icc: &testICC{},
checkedICC: true,
result: config1,
err: nil,
},
"in-cluster not checked when default config is invalid": {
defaultConfig: defaultInvalid,
clientConfig: &testClientConfig{config: config1},
icc: &testICC{},
checkedICC: false,
result: config1,
err: nil,
},
"in-cluster not checked when config is not equal to default": {
defaultConfig: default1,
clientConfig: &testClientConfig{config: config2},
icc: &testICC{},
checkedICC: false,
result: config2,
err: nil,
},
"in-cluster checked when config is not equal to default and error is empty": {
clientConfig: &testClientConfig{config: config2, err: ErrEmptyConfig},
icc: &testICC{},
checkedICC: true,
result: config2,
err: ErrEmptyConfig,
},
"in-cluster error returned when config is empty": {
clientConfig: &testClientConfig{err: ErrEmptyConfig},
icc: &testICC{
possible: true,
testClientConfig: testClientConfig{
err: err1,
},
},
checkedICC: true,
result: nil,
err: err1,
},
"in-cluster config returned when config is empty": {
clientConfig: &testClientConfig{err: ErrEmptyConfig},
icc: &testICC{
possible: true,
testClientConfig: testClientConfig{
config: config2,
},
},
checkedICC: true,
result: config2,
err: nil,
},
"in-cluster not checked when standard default is invalid": {
defaultConfig: &DefaultClientConfig,
clientConfig: &testClientConfig{config: config2},
icc: &testICC{},
checkedICC: false,
result: config2,
err: nil,
},
}
for name, test := range testCases {
c := &DeferredLoadingClientConfig{icc: test.icc}
c.loader = &ClientConfigLoadingRules{DefaultClientConfig: test.defaultConfig}
c.clientConfig = test.clientConfig
cfg, err := c.ClientConfig()
if test.icc.called != test.checkedICC {
t.Errorf("%s: unexpected in-cluster-config call %t", name, test.icc.called)
}
if err != test.err || cfg != test.result {
t.Errorf("%s: unexpected result: %v %#v", name, err, cfg)
}
}
}
func TestInClusterConfigNamespace(t *testing.T) {
err1 := fmt.Errorf("unique error")
testCases := map[string]struct {
clientConfig *testClientConfig
icc *testICC
checkedICC bool
result string
ok bool
err error
}{
"in-cluster checked on empty error": {
clientConfig: &testClientConfig{err: ErrEmptyConfig},
icc: &testICC{},
checkedICC: true,
err: ErrEmptyConfig,
},
"in-cluster not checked on non-empty error": {
clientConfig: &testClientConfig{err: ErrEmptyCluster},
icc: &testICC{},
err: ErrEmptyCluster,
},
"in-cluster checked when config is default": {
clientConfig: &testClientConfig{},
icc: &testICC{},
checkedICC: true,
},
"in-cluster not checked when config is not equal to default": {
clientConfig: &testClientConfig{namespace: "test", namespaceSpecified: true},
icc: &testICC{},
result: "test",
ok: true,
},
"in-cluster checked when namespace is not specified, but is defaulted": {
clientConfig: &testClientConfig{namespace: "test", namespaceSpecified: false},
icc: &testICC{},
checkedICC: true,
result: "test",
ok: false,
},
"in-cluster error returned when config is empty": {
clientConfig: &testClientConfig{err: ErrEmptyConfig},
icc: &testICC{
possible: true,
testClientConfig: testClientConfig{
err: err1,
},
},
checkedICC: true,
err: err1,
},
"in-cluster config returned when config is empty": {
clientConfig: &testClientConfig{err: ErrEmptyConfig},
icc: &testICC{
possible: true,
testClientConfig: testClientConfig{
namespace: "test",
namespaceSpecified: true,
},
},
checkedICC: true,
result: "test",
ok: true,
},
"in-cluster config returned when config is empty and namespace is defaulted but not explicitly set": {
clientConfig: &testClientConfig{err: ErrEmptyConfig},
icc: &testICC{
possible: true,
testClientConfig: testClientConfig{
namespace: "test",
namespaceSpecified: false,
},
},
checkedICC: true,
result: "test",
ok: false,
},
}
for name, test := range testCases {
c := &DeferredLoadingClientConfig{icc: test.icc}
c.clientConfig = test.clientConfig
ns, ok, err := c.Namespace()
if test.icc.called != test.checkedICC {
t.Errorf("%s: unexpected in-cluster-config call %t", name, test.icc.called)
}
if err != test.err || ns != test.result || ok != test.ok {
t.Errorf("%s: unexpected result: %v %s %t", name, err, ns, ok)
}
}
}

View File

@@ -1,50 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clientcmd
import (
"testing"
"github.com/spf13/pflag"
)
func TestNamespacePrefixStrip(t *testing.T) {
testData := map[string]string{
"namespaces/foo": "foo",
"NAMESPACES/foo": "foo",
"NameSpaces/foo": "foo",
"namespace/foo": "foo",
"NAMESPACE/foo": "foo",
"nameSpace/foo": "foo",
"ns/foo": "foo",
"NS/foo": "foo",
"namespaces/": "namespaces/",
"namespace/": "namespace/",
"ns/": "ns/",
}
for before, after := range testData {
overrides := &ConfigOverrides{}
fs := &pflag.FlagSet{}
BindOverrideFlags(overrides, fs, RecommendedConfigOverrideFlags(""))
fs.Parse([]string{"--namespace", before})
if overrides.Context.Namespace != after {
t.Fatalf("Expected %s, got %s", after, overrides.Context.Namespace)
}
}
}

View File

@@ -1,574 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clientcmd
import (
"io/ioutil"
"os"
"strings"
"testing"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
func TestConfirmUsableBadInfoButOkConfig(t *testing.T) {
config := clientcmdapi.NewConfig()
config.Clusters["missing ca"] = &clientcmdapi.Cluster{
Server: "anything",
CertificateAuthority: "missing",
}
config.AuthInfos["error"] = &clientcmdapi.AuthInfo{
Username: "anything",
Token: "here",
}
config.Contexts["dirty"] = &clientcmdapi.Context{
Cluster: "missing ca",
AuthInfo: "error",
}
config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: "anything",
}
config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
Token: "here",
}
config.Contexts["clean"] = &clientcmdapi.Context{
Cluster: "clean",
AuthInfo: "clean",
}
badValidation := configValidationTest{
config: config,
expectedErrorSubstring: []string{"unable to read certificate-authority"},
}
okTest := configValidationTest{
config: config,
}
okTest.testConfirmUsable("clean", t)
badValidation.testConfig(t)
}
func TestConfirmUsableBadInfoConfig(t *testing.T) {
config := clientcmdapi.NewConfig()
config.Clusters["missing ca"] = &clientcmdapi.Cluster{
Server: "anything",
CertificateAuthority: "missing",
}
config.AuthInfos["error"] = &clientcmdapi.AuthInfo{
Username: "anything",
Token: "here",
}
config.Contexts["first"] = &clientcmdapi.Context{
Cluster: "missing ca",
AuthInfo: "error",
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"unable to read certificate-authority"},
}
test.testConfirmUsable("first", t)
}
func TestConfirmUsableEmptyConfig(t *testing.T) {
config := clientcmdapi.NewConfig()
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"invalid configuration: no configuration has been provided"},
}
test.testConfirmUsable("", t)
}
func TestConfirmUsableMissingConfig(t *testing.T) {
config := clientcmdapi.NewConfig()
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"invalid configuration: no configuration has been provided"},
}
test.testConfirmUsable("not-here", t)
}
func TestValidateEmptyConfig(t *testing.T) {
config := clientcmdapi.NewConfig()
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"invalid configuration: no configuration has been provided"},
}
test.testConfig(t)
}
func TestValidateMissingCurrentContextConfig(t *testing.T) {
config := clientcmdapi.NewConfig()
config.CurrentContext = "anything"
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"context was not found for specified "},
}
test.testConfig(t)
}
func TestIsContextNotFound(t *testing.T) {
config := clientcmdapi.NewConfig()
config.CurrentContext = "anything"
err := Validate(*config)
if !IsContextNotFound(err) {
t.Errorf("Expected context not found, but got %v", err)
}
if !IsConfigurationInvalid(err) {
t.Errorf("Expected configuration invalid, but got %v", err)
}
}
func TestIsEmptyConfig(t *testing.T) {
config := clientcmdapi.NewConfig()
err := Validate(*config)
if !IsEmptyConfig(err) {
t.Errorf("Expected context not found, but got %v", err)
}
if !IsConfigurationInvalid(err) {
t.Errorf("Expected configuration invalid, but got %v", err)
}
}
func TestIsConfigurationInvalid(t *testing.T) {
if newErrConfigurationInvalid([]error{}) != nil {
t.Errorf("unexpected error")
}
if newErrConfigurationInvalid([]error{ErrNoContext}) == ErrNoContext {
t.Errorf("unexpected error")
}
if newErrConfigurationInvalid([]error{ErrNoContext, ErrNoContext}) == nil {
t.Errorf("unexpected error")
}
if !IsConfigurationInvalid(newErrConfigurationInvalid([]error{ErrNoContext, ErrNoContext})) {
t.Errorf("unexpected error")
}
}
func TestValidateMissingReferencesConfig(t *testing.T) {
config := clientcmdapi.NewConfig()
config.CurrentContext = "anything"
config.Contexts["anything"] = &clientcmdapi.Context{Cluster: "missing", AuthInfo: "missing"}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"user \"missing\" was not found for context \"anything\"", "cluster \"missing\" was not found for context \"anything\""},
}
test.testContext("anything", t)
test.testConfig(t)
}
func TestValidateEmptyContext(t *testing.T) {
config := clientcmdapi.NewConfig()
config.CurrentContext = "anything"
config.Contexts["anything"] = &clientcmdapi.Context{}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"user was not specified for context \"anything\"", "cluster was not specified for context \"anything\""},
}
test.testContext("anything", t)
test.testConfig(t)
}
func TestValidateEmptyContextName(t *testing.T) {
config := clientcmdapi.NewConfig()
config.CurrentContext = "anything"
config.Contexts[""] = &clientcmdapi.Context{Cluster: "missing", AuthInfo: "missing"}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"empty context name", "is not allowed"},
}
test.testContext("", t)
test.testConfig(t)
}
func TestValidateEmptyClusterInfo(t *testing.T) {
config := clientcmdapi.NewConfig()
config.Clusters["empty"] = clientcmdapi.NewCluster()
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"cluster has no server defined"},
}
test.testCluster("empty", t)
test.testConfig(t)
}
func TestValidateClusterInfoErrEmptyCluster(t *testing.T) {
cluster := clientcmdapi.NewCluster()
errs := validateClusterInfo("", *cluster)
if len(errs) != 1 {
t.Fatalf("unexpected errors: %v", errs)
}
if errs[0] != ErrEmptyCluster {
t.Errorf("unexpected error: %v", errs[0])
}
}
func TestValidateMissingCAFileClusterInfo(t *testing.T) {
config := clientcmdapi.NewConfig()
config.Clusters["missing ca"] = &clientcmdapi.Cluster{
Server: "anything",
CertificateAuthority: "missing",
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"unable to read certificate-authority"},
}
test.testCluster("missing ca", t)
test.testConfig(t)
}
func TestValidateCleanClusterInfo(t *testing.T) {
config := clientcmdapi.NewConfig()
config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: "anything",
}
test := configValidationTest{
config: config,
}
test.testCluster("clean", t)
test.testConfig(t)
}
func TestValidateCleanWithCAClusterInfo(t *testing.T) {
tempFile, _ := ioutil.TempFile("", "")
defer os.Remove(tempFile.Name())
config := clientcmdapi.NewConfig()
config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: "anything",
CertificateAuthority: tempFile.Name(),
}
test := configValidationTest{
config: config,
}
test.testCluster("clean", t)
test.testConfig(t)
}
func TestValidateEmptyAuthInfo(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["error"] = &clientcmdapi.AuthInfo{}
test := configValidationTest{
config: config,
}
test.testAuthInfo("error", t)
test.testConfig(t)
}
func TestValidateCertFilesNotFoundAuthInfo(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["error"] = &clientcmdapi.AuthInfo{
ClientCertificate: "missing",
ClientKey: "missing",
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"unable to read client-cert", "unable to read client-key"},
}
test.testAuthInfo("error", t)
test.testConfig(t)
}
func TestValidateCertDataOverridesFiles(t *testing.T) {
tempFile, _ := ioutil.TempFile("", "")
defer os.Remove(tempFile.Name())
config := clientcmdapi.NewConfig()
config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
ClientCertificate: tempFile.Name(),
ClientCertificateData: []byte("certdata"),
ClientKey: tempFile.Name(),
ClientKeyData: []byte("keydata"),
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"client-cert-data and client-cert are both specified", "client-key-data and client-key are both specified"},
}
test.testAuthInfo("clean", t)
test.testConfig(t)
}
func TestValidateCleanCertFilesAuthInfo(t *testing.T) {
tempFile, _ := ioutil.TempFile("", "")
defer os.Remove(tempFile.Name())
config := clientcmdapi.NewConfig()
config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
ClientCertificate: tempFile.Name(),
ClientKey: tempFile.Name(),
}
test := configValidationTest{
config: config,
}
test.testAuthInfo("clean", t)
test.testConfig(t)
}
func TestValidateCleanTokenAuthInfo(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
Token: "any-value",
}
test := configValidationTest{
config: config,
}
test.testAuthInfo("clean", t)
test.testConfig(t)
}
func TestValidateMultipleMethodsAuthInfo(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["error"] = &clientcmdapi.AuthInfo{
Token: "token",
Username: "username",
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"more than one authentication method", "token", "basicAuth"},
}
test.testAuthInfo("error", t)
test.testConfig(t)
}
func TestValidateAuthInfoExec(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["user"] = &clientcmdapi.AuthInfo{
Exec: &clientcmdapi.ExecConfig{
Command: "/bin/example",
APIVersion: "clientauthentication.k8s.io/v1alpha1",
Args: []string{"hello", "world"},
Env: []clientcmdapi.ExecEnvVar{
{Name: "foo", Value: "bar"},
},
},
}
test := configValidationTest{
config: config,
}
test.testAuthInfo("user", t)
test.testConfig(t)
}
func TestValidateAuthInfoExecNoVersion(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["user"] = &clientcmdapi.AuthInfo{
Exec: &clientcmdapi.ExecConfig{
Command: "/bin/example",
},
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{
"apiVersion must be specified for user to use exec authentication plugin",
},
}
test.testAuthInfo("user", t)
test.testConfig(t)
}
func TestValidateAuthInfoExecNoCommand(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["user"] = &clientcmdapi.AuthInfo{
Exec: &clientcmdapi.ExecConfig{
APIVersion: "clientauthentication.k8s.io/v1alpha1",
},
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{
"command must be specified for user to use exec authentication plugin",
},
}
test.testAuthInfo("user", t)
test.testConfig(t)
}
func TestValidateAuthInfoExecWithAuthProvider(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["user"] = &clientcmdapi.AuthInfo{
AuthProvider: &clientcmdapi.AuthProviderConfig{
Name: "oidc",
},
Exec: &clientcmdapi.ExecConfig{
Command: "/bin/example",
APIVersion: "clientauthentication.k8s.io/v1alpha1",
},
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{
"authProvider cannot be provided in combination with an exec plugin for user",
},
}
test.testAuthInfo("user", t)
test.testConfig(t)
}
func TestValidateAuthInfoExecInvalidEnv(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["user"] = &clientcmdapi.AuthInfo{
Exec: &clientcmdapi.ExecConfig{
Command: "/bin/example",
APIVersion: "clientauthentication.k8s.io/v1alpha1",
Env: []clientcmdapi.ExecEnvVar{
{Name: "foo"}, // No value
},
},
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{
"env variable foo value must be specified for user to use exec authentication plugin",
},
}
test.testAuthInfo("user", t)
test.testConfig(t)
}
type configValidationTest struct {
config *clientcmdapi.Config
expectedErrorSubstring []string
}
func (c configValidationTest) testContext(contextName string, t *testing.T) {
errs := validateContext(contextName, *c.config.Contexts[contextName], *c.config)
if len(c.expectedErrorSubstring) != 0 {
if len(errs) == 0 {
t.Errorf("Expected error containing: %v", c.expectedErrorSubstring)
}
for _, curr := range c.expectedErrorSubstring {
if len(errs) != 0 && !strings.Contains(utilerrors.NewAggregate(errs).Error(), curr) {
t.Errorf("Expected error containing: %v, but got %v", c.expectedErrorSubstring, utilerrors.NewAggregate(errs))
}
}
} else {
if len(errs) != 0 {
t.Errorf("Unexpected error: %v", utilerrors.NewAggregate(errs))
}
}
}
func (c configValidationTest) testConfirmUsable(contextName string, t *testing.T) {
err := ConfirmUsable(*c.config, contextName)
if len(c.expectedErrorSubstring) != 0 {
if err == nil {
t.Errorf("Expected error containing: %v", c.expectedErrorSubstring)
} else {
for _, curr := range c.expectedErrorSubstring {
if err != nil && !strings.Contains(err.Error(), curr) {
t.Errorf("Expected error containing: %v, but got %v", c.expectedErrorSubstring, err)
}
}
}
} else {
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
}
}
func (c configValidationTest) testConfig(t *testing.T) {
err := Validate(*c.config)
if len(c.expectedErrorSubstring) != 0 {
if err == nil {
t.Errorf("Expected error containing: %v", c.expectedErrorSubstring)
} else {
for _, curr := range c.expectedErrorSubstring {
if err != nil && !strings.Contains(err.Error(), curr) {
t.Errorf("Expected error containing: %v, but got %v", c.expectedErrorSubstring, err)
}
}
if !IsConfigurationInvalid(err) {
t.Errorf("all errors should be configuration invalid: %v", err)
}
}
} else {
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
}
}
func (c configValidationTest) testCluster(clusterName string, t *testing.T) {
errs := validateClusterInfo(clusterName, *c.config.Clusters[clusterName])
if len(c.expectedErrorSubstring) != 0 {
if len(errs) == 0 {
t.Errorf("Expected error containing: %v", c.expectedErrorSubstring)
}
for _, curr := range c.expectedErrorSubstring {
if len(errs) != 0 && !strings.Contains(utilerrors.NewAggregate(errs).Error(), curr) {
t.Errorf("Expected error containing: %v, but got %v", c.expectedErrorSubstring, utilerrors.NewAggregate(errs))
}
}
} else {
if len(errs) != 0 {
t.Errorf("Unexpected error: %v", utilerrors.NewAggregate(errs))
}
}
}
func (c configValidationTest) testAuthInfo(authInfoName string, t *testing.T) {
errs := validateAuthInfo(authInfoName, *c.config.AuthInfos[authInfoName])
if len(c.expectedErrorSubstring) != 0 {
if len(errs) == 0 {
t.Errorf("Expected error containing: %v", c.expectedErrorSubstring)
}
for _, curr := range c.expectedErrorSubstring {
if len(errs) != 0 && !strings.Contains(utilerrors.NewAggregate(errs).Error(), curr) {
t.Errorf("Expected error containing: %v, but got %v", c.expectedErrorSubstring, utilerrors.NewAggregate(errs))
}
}
} else {
if len(errs) != 0 {
t.Errorf("Unexpected error: %v", utilerrors.NewAggregate(errs))
}
}
}

View File

@@ -1,13 +0,0 @@
approvers:
- mikedanese
- timothysc
reviewers:
- wojtek-t
- deads2k
- mikedanese
- gmarek
- eparis
- timothysc
- ingvagabund
- resouer
- goltermann

View File

@@ -1,69 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package leaderelection
import (
"net/http"
"sync"
"time"
)
// HealthzAdaptor associates the /healthz endpoint with the LeaderElection object.
// It helps deal with the /healthz endpoint being set up prior to the LeaderElection.
// This contains the code needed to act as an adaptor between the leader
// election code the health check code. It allows us to provide health
// status about the leader election. Most specifically about if the leader
// has failed to renew without exiting the process. In that case we should
// report not healthy and rely on the kubelet to take down the process.
type HealthzAdaptor struct {
pointerLock sync.Mutex
le *LeaderElector
timeout time.Duration
}
// Name returns the name of the health check we are implementing.
func (l *HealthzAdaptor) Name() string {
return "leaderElection"
}
// Check is called by the healthz endpoint handler.
// It fails (returns an error) if we own the lease but had not been able to renew it.
func (l *HealthzAdaptor) Check(req *http.Request) error {
l.pointerLock.Lock()
defer l.pointerLock.Unlock()
if l.le == nil {
return nil
}
return l.le.Check(l.timeout)
}
// SetLeaderElection ties a leader election object to a HealthzAdaptor
func (l *HealthzAdaptor) SetLeaderElection(le *LeaderElector) {
l.pointerLock.Lock()
defer l.pointerLock.Unlock()
l.le = le
}
// NewLeaderHealthzAdaptor creates a basic healthz adaptor to monitor a leader election.
// timeout determines the time beyond the lease expiry to be allowed for timeout.
// checks within the timeout period after the lease expires will still return healthy.
func NewLeaderHealthzAdaptor(timeout time.Duration) *HealthzAdaptor {
result := &HealthzAdaptor{
timeout: timeout,
}
return result
}

View File

@@ -1,175 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package leaderelection
import (
"fmt"
"testing"
"time"
"k8s.io/apimachinery/pkg/util/clock"
rl "k8s.io/client-go/tools/leaderelection/resourcelock"
"net/http"
)
type fakeLock struct {
identity string
}
// Get is a dummy to allow us to have a fakeLock for testing.
func (fl *fakeLock) Get() (ler *rl.LeaderElectionRecord, err error) {
return nil, nil
}
// Create is a dummy to allow us to have a fakeLock for testing.
func (fl *fakeLock) Create(ler rl.LeaderElectionRecord) error {
return nil
}
// Update is a dummy to allow us to have a fakeLock for testing.
func (fl *fakeLock) Update(ler rl.LeaderElectionRecord) error {
return nil
}
// RecordEvent is a dummy to allow us to have a fakeLock for testing.
func (fl *fakeLock) RecordEvent(string) {}
// Identity is a dummy to allow us to have a fakeLock for testing.
func (fl *fakeLock) Identity() string {
return fl.identity
}
// Describe is a dummy to allow us to have a fakeLock for testing.
func (fl *fakeLock) Describe() string {
return "Dummy implementation of lock for testing"
}
// TestLeaderElectionHealthChecker tests that the healthcheck for leader election handles its edge cases.
func TestLeaderElectionHealthChecker(t *testing.T) {
current := time.Now()
req := &http.Request{}
tests := []struct {
description string
expected error
adaptorTimeout time.Duration
elector *LeaderElector
}{
{
description: "call check before leader elector initialized",
expected: nil,
adaptorTimeout: time.Second * 20,
elector: nil,
},
{
description: "call check when the the lease is far expired",
expected: fmt.Errorf("failed election to renew leadership on lease %s", "foo"),
adaptorTimeout: time.Second * 20,
elector: &LeaderElector{
config: LeaderElectionConfig{
Lock: &fakeLock{identity: "healthTest"},
LeaseDuration: time.Minute,
Name: "foo",
},
observedRecord: rl.LeaderElectionRecord{
HolderIdentity: "healthTest",
},
observedTime: current,
clock: clock.NewFakeClock(current.Add(time.Hour)),
},
},
{
description: "call check when the the lease is far expired but held by another server",
expected: nil,
adaptorTimeout: time.Second * 20,
elector: &LeaderElector{
config: LeaderElectionConfig{
Lock: &fakeLock{identity: "healthTest"},
LeaseDuration: time.Minute,
Name: "foo",
},
observedRecord: rl.LeaderElectionRecord{
HolderIdentity: "otherServer",
},
observedTime: current,
clock: clock.NewFakeClock(current.Add(time.Hour)),
},
},
{
description: "call check when the the lease is not expired",
expected: nil,
adaptorTimeout: time.Second * 20,
elector: &LeaderElector{
config: LeaderElectionConfig{
Lock: &fakeLock{identity: "healthTest"},
LeaseDuration: time.Minute,
Name: "foo",
},
observedRecord: rl.LeaderElectionRecord{
HolderIdentity: "healthTest",
},
observedTime: current,
clock: clock.NewFakeClock(current),
},
},
{
description: "call check when the the lease is expired but inside the timeout",
expected: nil,
adaptorTimeout: time.Second * 20,
elector: &LeaderElector{
config: LeaderElectionConfig{
Lock: &fakeLock{identity: "healthTest"},
LeaseDuration: time.Minute,
Name: "foo",
},
observedRecord: rl.LeaderElectionRecord{
HolderIdentity: "healthTest",
},
observedTime: current,
clock: clock.NewFakeClock(current.Add(time.Minute).Add(time.Second)),
},
},
}
for _, test := range tests {
adaptor := NewLeaderHealthzAdaptor(test.adaptorTimeout)
if adaptor.le != nil {
t.Errorf("[%s] leaderChecker started with a LeaderElector %v", test.description, adaptor.le)
}
if test.elector != nil {
test.elector.config.WatchDog = adaptor
adaptor.SetLeaderElection(test.elector)
if adaptor.le == nil {
t.Errorf("[%s] adaptor failed to set the LeaderElector", test.description)
}
}
err := adaptor.Check(req)
if test.expected == nil {
if err == nil {
continue
}
t.Errorf("[%s] called check, expected no error but received \"%v\"", test.description, err)
} else {
if err == nil {
t.Errorf("[%s] called check and failed to received the expected error \"%v\"", test.description, test.expected)
}
if err.Error() != test.expected.Error() {
t.Errorf("[%s] called check, expected %v, received %v", test.description, test.expected, err)
}
}
}
}

View File

@@ -1,336 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package leaderelection implements leader election of a set of endpoints.
// It uses an annotation in the endpoints object to store the record of the
// election state.
//
// This implementation does not guarantee that only one client is acting as a
// leader (a.k.a. fencing). A client observes timestamps captured locally to
// infer the state of the leader election. Thus the implementation is tolerant
// to arbitrary clock skew, but is not tolerant to arbitrary clock skew rate.
//
// However the level of tolerance to skew rate can be configured by setting
// RenewDeadline and LeaseDuration appropriately. The tolerance expressed as a
// maximum tolerated ratio of time passed on the fastest node to time passed on
// the slowest node can be approximately achieved with a configuration that sets
// the same ratio of LeaseDuration to RenewDeadline. For example if a user wanted
// to tolerate some nodes progressing forward in time twice as fast as other nodes,
// the user could set LeaseDuration to 60 seconds and RenewDeadline to 30 seconds.
//
// While not required, some method of clock synchronization between nodes in the
// cluster is highly recommended. It's important to keep in mind when configuring
// this client that the tolerance to skew rate varies inversely to master
// availability.
//
// Larger clusters often have a more lenient SLA for API latency. This should be
// taken into account when configuring the client. The rate of leader transitions
// should be monitored and RetryPeriod and LeaseDuration should be increased
// until the rate is stable and acceptably low. It's important to keep in mind
// when configuring this client that the tolerance to API latency varies inversely
// to master availability.
//
// DISCLAIMER: this is an alpha API. This library will likely change significantly
// or even be removed entirely in subsequent releases. Depend on this API at
// your own risk.
package leaderelection
import (
"context"
"fmt"
"reflect"
"time"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
rl "k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/klog"
)
const (
JitterFactor = 1.2
)
// NewLeaderElector creates a LeaderElector from a LeaderElectionConfig
func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) {
if lec.LeaseDuration <= lec.RenewDeadline {
return nil, fmt.Errorf("leaseDuration must be greater than renewDeadline")
}
if lec.RenewDeadline <= time.Duration(JitterFactor*float64(lec.RetryPeriod)) {
return nil, fmt.Errorf("renewDeadline must be greater than retryPeriod*JitterFactor")
}
if lec.LeaseDuration < 1 {
return nil, fmt.Errorf("leaseDuration must be greater than zero")
}
if lec.RenewDeadline < 1 {
return nil, fmt.Errorf("renewDeadline must be greater than zero")
}
if lec.RetryPeriod < 1 {
return nil, fmt.Errorf("retryPeriod must be greater than zero")
}
if lec.Lock == nil {
return nil, fmt.Errorf("Lock must not be nil.")
}
return &LeaderElector{
config: lec,
clock: clock.RealClock{},
}, nil
}
type LeaderElectionConfig struct {
// Lock is the resource that will be used for locking
Lock rl.Interface
// LeaseDuration is the duration that non-leader candidates will
// wait to force acquire leadership. This is measured against time of
// last observed ack.
LeaseDuration time.Duration
// RenewDeadline is the duration that the acting master will retry
// refreshing leadership before giving up.
RenewDeadline time.Duration
// RetryPeriod is the duration the LeaderElector clients should wait
// between tries of actions.
RetryPeriod time.Duration
// Callbacks are callbacks that are triggered during certain lifecycle
// events of the LeaderElector
Callbacks LeaderCallbacks
// WatchDog is the associated health checker
// WatchDog may be null if its not needed/configured.
WatchDog *HealthzAdaptor
// Name is the name of the resource lock for debugging
Name string
}
// LeaderCallbacks are callbacks that are triggered during certain
// lifecycle events of the LeaderElector. These are invoked asynchronously.
//
// possible future callbacks:
// * OnChallenge()
type LeaderCallbacks struct {
// OnStartedLeading is called when a LeaderElector client starts leading
OnStartedLeading func(context.Context)
// OnStoppedLeading is called when a LeaderElector client stops leading
OnStoppedLeading func()
// OnNewLeader is called when the client observes a leader that is
// not the previously observed leader. This includes the first observed
// leader when the client starts.
OnNewLeader func(identity string)
}
// LeaderElector is a leader election client.
type LeaderElector struct {
config LeaderElectionConfig
// internal bookkeeping
observedRecord rl.LeaderElectionRecord
observedTime time.Time
// used to implement OnNewLeader(), may lag slightly from the
// value observedRecord.HolderIdentity if the transition has
// not yet been reported.
reportedLeader string
// clock is wrapper around time to allow for less flaky testing
clock clock.Clock
// name is the name of the resource lock for debugging
name string
}
// Run starts the leader election loop
func (le *LeaderElector) Run(ctx context.Context) {
defer func() {
runtime.HandleCrash()
le.config.Callbacks.OnStoppedLeading()
}()
if !le.acquire(ctx) {
return // ctx signalled done
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go le.config.Callbacks.OnStartedLeading(ctx)
le.renew(ctx)
}
// RunOrDie starts a client with the provided config or panics if the config
// fails to validate.
func RunOrDie(ctx context.Context, lec LeaderElectionConfig) {
le, err := NewLeaderElector(lec)
if err != nil {
panic(err)
}
if lec.WatchDog != nil {
lec.WatchDog.SetLeaderElection(le)
}
le.Run(ctx)
}
// GetLeader returns the identity of the last observed leader or returns the empty string if
// no leader has yet been observed.
func (le *LeaderElector) GetLeader() string {
return le.observedRecord.HolderIdentity
}
// IsLeader returns true if the last observed leader was this client else returns false.
func (le *LeaderElector) IsLeader() bool {
return le.observedRecord.HolderIdentity == le.config.Lock.Identity()
}
// acquire loops calling tryAcquireOrRenew and returns true immediately when tryAcquireOrRenew succeeds.
// Returns false if ctx signals done.
func (le *LeaderElector) acquire(ctx context.Context) bool {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
succeeded := false
desc := le.config.Lock.Describe()
klog.Infof("attempting to acquire leader lease %v...", desc)
wait.JitterUntil(func() {
succeeded = le.tryAcquireOrRenew()
le.maybeReportTransition()
if !succeeded {
klog.V(4).Infof("failed to acquire lease %v", desc)
return
}
le.config.Lock.RecordEvent("became leader")
klog.Infof("successfully acquired lease %v", desc)
cancel()
}, le.config.RetryPeriod, JitterFactor, true, ctx.Done())
return succeeded
}
// renew loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew fails or ctx signals done.
func (le *LeaderElector) renew(ctx context.Context) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
wait.Until(func() {
timeoutCtx, timeoutCancel := context.WithTimeout(ctx, le.config.RenewDeadline)
defer timeoutCancel()
err := wait.PollImmediateUntil(le.config.RetryPeriod, func() (bool, error) {
done := make(chan bool, 1)
go func() {
defer close(done)
done <- le.tryAcquireOrRenew()
}()
select {
case <-timeoutCtx.Done():
return false, fmt.Errorf("failed to tryAcquireOrRenew %s", timeoutCtx.Err())
case result := <-done:
return result, nil
}
}, timeoutCtx.Done())
le.maybeReportTransition()
desc := le.config.Lock.Describe()
if err == nil {
klog.V(5).Infof("successfully renewed lease %v", desc)
return
}
le.config.Lock.RecordEvent("stopped leading")
klog.Infof("failed to renew lease %v: %v", desc, err)
cancel()
}, le.config.RetryPeriod, ctx.Done())
}
// tryAcquireOrRenew tries to acquire a leader lease if it is not already acquired,
// else it tries to renew the lease if it has already been acquired. Returns true
// on success else returns false.
func (le *LeaderElector) tryAcquireOrRenew() bool {
now := metav1.Now()
leaderElectionRecord := rl.LeaderElectionRecord{
HolderIdentity: le.config.Lock.Identity(),
LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second),
RenewTime: now,
AcquireTime: now,
}
// 1. obtain or create the ElectionRecord
oldLeaderElectionRecord, err := le.config.Lock.Get()
if err != nil {
if !errors.IsNotFound(err) {
klog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err)
return false
}
if err = le.config.Lock.Create(leaderElectionRecord); err != nil {
klog.Errorf("error initially creating leader election record: %v", err)
return false
}
le.observedRecord = leaderElectionRecord
le.observedTime = le.clock.Now()
return true
}
// 2. Record obtained, check the Identity & Time
if !reflect.DeepEqual(le.observedRecord, *oldLeaderElectionRecord) {
le.observedRecord = *oldLeaderElectionRecord
le.observedTime = le.clock.Now()
}
if le.observedTime.Add(le.config.LeaseDuration).After(now.Time) &&
!le.IsLeader() {
klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity)
return false
}
// 3. We're going to try to update. The leaderElectionRecord is set to it's default
// here. Let's correct it before updating.
if le.IsLeader() {
leaderElectionRecord.AcquireTime = oldLeaderElectionRecord.AcquireTime
leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions
} else {
leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + 1
}
// update the lock itself
if err = le.config.Lock.Update(leaderElectionRecord); err != nil {
klog.Errorf("Failed to update lock: %v", err)
return false
}
le.observedRecord = leaderElectionRecord
le.observedTime = le.clock.Now()
return true
}
func (le *LeaderElector) maybeReportTransition() {
if le.observedRecord.HolderIdentity == le.reportedLeader {
return
}
le.reportedLeader = le.observedRecord.HolderIdentity
if le.config.Callbacks.OnNewLeader != nil {
go le.config.Callbacks.OnNewLeader(le.reportedLeader)
}
}
// Check will determine if the current lease is expired by more than timeout.
func (le *LeaderElector) Check(maxTolerableExpiredLease time.Duration) error {
if !le.IsLeader() {
// Currently not concerned with the case that we are hot standby
return nil
}
// If we are more than timeout seconds after the lease duration that is past the timeout
// on the lease renew. Time to start reporting ourselves as unhealthy. We should have
// died but conditions like deadlock can prevent this. (See #70819)
if le.clock.Since(le.observedTime) > le.config.LeaseDuration+maxTolerableExpiredLease {
return fmt.Errorf("failed election to renew leadership on lease %s", le.config.Name)
}
return nil
}

View File

@@ -1,294 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package leaderelection
import (
"fmt"
"sync"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
fakecorev1 "k8s.io/client-go/kubernetes/typed/core/v1/fake"
core "k8s.io/client-go/testing"
rl "k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
)
func createLockObject(objectType string, objectMeta metav1.ObjectMeta) (obj runtime.Object) {
switch objectType {
case "endpoints":
obj = &v1.Endpoints{ObjectMeta: objectMeta}
case "configmaps":
obj = &v1.ConfigMap{ObjectMeta: objectMeta}
default:
panic("unexpected objType:" + objectType)
}
return
}
// Will test leader election using endpoints as the resource
func TestTryAcquireOrRenewEndpoints(t *testing.T) {
testTryAcquireOrRenew(t, "endpoints")
}
func testTryAcquireOrRenew(t *testing.T, objectType string) {
future := time.Now().Add(1000 * time.Hour)
past := time.Now().Add(-1000 * time.Hour)
tests := []struct {
observedRecord rl.LeaderElectionRecord
observedTime time.Time
reactors []struct {
verb string
reaction core.ReactionFunc
}
expectSuccess bool
transitionLeader bool
outHolder string
}{
// acquire from no object
{
reactors: []struct {
verb string
reaction core.ReactionFunc
}{
{
verb: "get",
reaction: func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, errors.NewNotFound(action.(core.GetAction).GetResource().GroupResource(), action.(core.GetAction).GetName())
},
},
{
verb: "create",
reaction: func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(core.CreateAction).GetObject(), nil
},
},
},
expectSuccess: true,
outHolder: "baz",
},
// acquire from unled object
{
reactors: []struct {
verb string
reaction core.ReactionFunc
}{
{
verb: "get",
reaction: func(action core.Action) (handled bool, ret runtime.Object, err error) {
objectMeta := metav1.ObjectMeta{
Namespace: action.GetNamespace(),
Name: action.(core.GetAction).GetName(),
}
return true, createLockObject(objectType, objectMeta), nil
},
},
{
verb: "update",
reaction: func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(core.CreateAction).GetObject(), nil
},
},
},
expectSuccess: true,
transitionLeader: true,
outHolder: "baz",
},
// acquire from led, unacked object
{
reactors: []struct {
verb string
reaction core.ReactionFunc
}{
{
verb: "get",
reaction: func(action core.Action) (handled bool, ret runtime.Object, err error) {
objectMeta := metav1.ObjectMeta{
Namespace: action.GetNamespace(),
Name: action.(core.GetAction).GetName(),
Annotations: map[string]string{
rl.LeaderElectionRecordAnnotationKey: `{"holderIdentity":"bing"}`,
},
}
return true, createLockObject(objectType, objectMeta), nil
},
},
{
verb: "update",
reaction: func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(core.CreateAction).GetObject(), nil
},
},
},
observedRecord: rl.LeaderElectionRecord{HolderIdentity: "bing"},
observedTime: past,
expectSuccess: true,
transitionLeader: true,
outHolder: "baz",
},
// don't acquire from led, acked object
{
reactors: []struct {
verb string
reaction core.ReactionFunc
}{
{
verb: "get",
reaction: func(action core.Action) (handled bool, ret runtime.Object, err error) {
objectMeta := metav1.ObjectMeta{
Namespace: action.GetNamespace(),
Name: action.(core.GetAction).GetName(),
Annotations: map[string]string{
rl.LeaderElectionRecordAnnotationKey: `{"holderIdentity":"bing"}`,
},
}
return true, createLockObject(objectType, objectMeta), nil
},
},
},
observedTime: future,
expectSuccess: false,
outHolder: "bing",
},
// renew already acquired object
{
reactors: []struct {
verb string
reaction core.ReactionFunc
}{
{
verb: "get",
reaction: func(action core.Action) (handled bool, ret runtime.Object, err error) {
objectMeta := metav1.ObjectMeta{
Namespace: action.GetNamespace(),
Name: action.(core.GetAction).GetName(),
Annotations: map[string]string{
rl.LeaderElectionRecordAnnotationKey: `{"holderIdentity":"baz"}`,
},
}
return true, createLockObject(objectType, objectMeta), nil
},
},
{
verb: "update",
reaction: func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(core.CreateAction).GetObject(), nil
},
},
},
observedTime: future,
observedRecord: rl.LeaderElectionRecord{HolderIdentity: "baz"},
expectSuccess: true,
outHolder: "baz",
},
}
for i, test := range tests {
// OnNewLeader is called async so we have to wait for it.
var wg sync.WaitGroup
wg.Add(1)
var reportedLeader string
var lock rl.Interface
objectMeta := metav1.ObjectMeta{Namespace: "foo", Name: "bar"}
resourceLockConfig := rl.ResourceLockConfig{
Identity: "baz",
EventRecorder: &record.FakeRecorder{},
}
c := &fakecorev1.FakeCoreV1{Fake: &core.Fake{}}
for _, reactor := range test.reactors {
c.AddReactor(reactor.verb, objectType, reactor.reaction)
}
c.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
t.Errorf("[%v] unreachable action. testclient called too many times: %+v", i, action)
return true, nil, fmt.Errorf("unreachable action")
})
switch objectType {
case "endpoints":
lock = &rl.EndpointsLock{
EndpointsMeta: objectMeta,
LockConfig: resourceLockConfig,
Client: c,
}
case "configmaps":
lock = &rl.ConfigMapLock{
ConfigMapMeta: objectMeta,
LockConfig: resourceLockConfig,
Client: c,
}
}
lec := LeaderElectionConfig{
Lock: lock,
LeaseDuration: 10 * time.Second,
Callbacks: LeaderCallbacks{
OnNewLeader: func(l string) {
defer wg.Done()
reportedLeader = l
},
},
}
le := &LeaderElector{
config: lec,
observedRecord: test.observedRecord,
observedTime: test.observedTime,
clock: clock.RealClock{},
}
if test.expectSuccess != le.tryAcquireOrRenew() {
t.Errorf("[%v]unexpected result of tryAcquireOrRenew: [succeeded=%v]", i, !test.expectSuccess)
}
le.observedRecord.AcquireTime = metav1.Time{}
le.observedRecord.RenewTime = metav1.Time{}
if le.observedRecord.HolderIdentity != test.outHolder {
t.Errorf("[%v]expected holder:\n\t%+v\ngot:\n\t%+v", i, test.outHolder, le.observedRecord.HolderIdentity)
}
if len(test.reactors) != len(c.Actions()) {
t.Errorf("[%v]wrong number of api interactions", i)
}
if test.transitionLeader && le.observedRecord.LeaderTransitions != 1 {
t.Errorf("[%v]leader should have transitioned but did not", i)
}
if !test.transitionLeader && le.observedRecord.LeaderTransitions != 0 {
t.Errorf("[%v]leader should not have transitioned but did", i)
}
le.maybeReportTransition()
wg.Wait()
if reportedLeader != test.outHolder {
t.Errorf("[%v]reported leader was not the new leader. expected %q, got %q", i, test.outHolder, reportedLeader)
}
}
}
// Will test leader election using configmap as the resource
func TestTryAcquireOrRenewConfigMaps(t *testing.T) {
testTryAcquireOrRenew(t, "configmaps")
}

View File

@@ -1,109 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourcelock
import (
"encoding/json"
"errors"
"fmt"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
)
// TODO: This is almost a exact replica of Endpoints lock.
// going forwards as we self host more and more components
// and use ConfigMaps as the means to pass that configuration
// data we will likely move to deprecate the Endpoints lock.
type ConfigMapLock struct {
// ConfigMapMeta should contain a Name and a Namespace of a
// ConfigMapMeta object that the LeaderElector will attempt to lead.
ConfigMapMeta metav1.ObjectMeta
Client corev1client.ConfigMapsGetter
LockConfig ResourceLockConfig
cm *v1.ConfigMap
}
// Get returns the election record from a ConfigMap Annotation
func (cml *ConfigMapLock) Get() (*LeaderElectionRecord, error) {
var record LeaderElectionRecord
var err error
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Get(cml.ConfigMapMeta.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if cml.cm.Annotations == nil {
cml.cm.Annotations = make(map[string]string)
}
if recordBytes, found := cml.cm.Annotations[LeaderElectionRecordAnnotationKey]; found {
if err := json.Unmarshal([]byte(recordBytes), &record); err != nil {
return nil, err
}
}
return &record, nil
}
// Create attempts to create a LeaderElectionRecord annotation
func (cml *ConfigMapLock) Create(ler LeaderElectionRecord) error {
recordBytes, err := json.Marshal(ler)
if err != nil {
return err
}
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Create(&v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: cml.ConfigMapMeta.Name,
Namespace: cml.ConfigMapMeta.Namespace,
Annotations: map[string]string{
LeaderElectionRecordAnnotationKey: string(recordBytes),
},
},
})
return err
}
// Update will update an existing annotation on a given resource.
func (cml *ConfigMapLock) Update(ler LeaderElectionRecord) error {
if cml.cm == nil {
return errors.New("configmap not initialized, call get or create first")
}
recordBytes, err := json.Marshal(ler)
if err != nil {
return err
}
cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(cml.cm)
return err
}
// RecordEvent in leader election while adding meta-data
func (cml *ConfigMapLock) RecordEvent(s string) {
events := fmt.Sprintf("%v %v", cml.LockConfig.Identity, s)
cml.LockConfig.EventRecorder.Eventf(&v1.ConfigMap{ObjectMeta: cml.cm.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events)
}
// Describe is used to convert details on current resource lock
// into a string
func (cml *ConfigMapLock) Describe() string {
return fmt.Sprintf("%v/%v", cml.ConfigMapMeta.Namespace, cml.ConfigMapMeta.Name)
}
// returns the Identity of the lock
func (cml *ConfigMapLock) Identity() string {
return cml.LockConfig.Identity
}

View File

@@ -1,104 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourcelock
import (
"encoding/json"
"errors"
"fmt"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
)
type EndpointsLock struct {
// EndpointsMeta should contain a Name and a Namespace of an
// Endpoints object that the LeaderElector will attempt to lead.
EndpointsMeta metav1.ObjectMeta
Client corev1client.EndpointsGetter
LockConfig ResourceLockConfig
e *v1.Endpoints
}
// Get returns the election record from a Endpoints Annotation
func (el *EndpointsLock) Get() (*LeaderElectionRecord, error) {
var record LeaderElectionRecord
var err error
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Get(el.EndpointsMeta.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if el.e.Annotations == nil {
el.e.Annotations = make(map[string]string)
}
if recordBytes, found := el.e.Annotations[LeaderElectionRecordAnnotationKey]; found {
if err := json.Unmarshal([]byte(recordBytes), &record); err != nil {
return nil, err
}
}
return &record, nil
}
// Create attempts to create a LeaderElectionRecord annotation
func (el *EndpointsLock) Create(ler LeaderElectionRecord) error {
recordBytes, err := json.Marshal(ler)
if err != nil {
return err
}
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Create(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: el.EndpointsMeta.Name,
Namespace: el.EndpointsMeta.Namespace,
Annotations: map[string]string{
LeaderElectionRecordAnnotationKey: string(recordBytes),
},
},
})
return err
}
// Update will update and existing annotation on a given resource.
func (el *EndpointsLock) Update(ler LeaderElectionRecord) error {
if el.e == nil {
return errors.New("endpoint not initialized, call get or create first")
}
recordBytes, err := json.Marshal(ler)
if err != nil {
return err
}
el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(el.e)
return err
}
// RecordEvent in leader election while adding meta-data
func (el *EndpointsLock) RecordEvent(s string) {
events := fmt.Sprintf("%v %v", el.LockConfig.Identity, s)
el.LockConfig.EventRecorder.Eventf(&v1.Endpoints{ObjectMeta: el.e.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events)
}
// Describe is used to convert details on current resource lock
// into a string
func (el *EndpointsLock) Describe() string {
return fmt.Sprintf("%v/%v", el.EndpointsMeta.Namespace, el.EndpointsMeta.Name)
}
// returns the Identity of the lock
func (el *EndpointsLock) Identity() string {
return el.LockConfig.Identity
}

View File

@@ -1,102 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourcelock
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
)
const (
LeaderElectionRecordAnnotationKey = "control-plane.alpha.kubernetes.io/leader"
EndpointsResourceLock = "endpoints"
ConfigMapsResourceLock = "configmaps"
)
// LeaderElectionRecord is the record that is stored in the leader election annotation.
// This information should be used for observational purposes only and could be replaced
// with a random string (e.g. UUID) with only slight modification of this code.
// TODO(mikedanese): this should potentially be versioned
type LeaderElectionRecord struct {
HolderIdentity string `json:"holderIdentity"`
LeaseDurationSeconds int `json:"leaseDurationSeconds"`
AcquireTime metav1.Time `json:"acquireTime"`
RenewTime metav1.Time `json:"renewTime"`
LeaderTransitions int `json:"leaderTransitions"`
}
// ResourceLockConfig common data that exists across different
// resource locks
type ResourceLockConfig struct {
Identity string
EventRecorder record.EventRecorder
}
// Interface offers a common interface for locking on arbitrary
// resources used in leader election. The Interface is used
// to hide the details on specific implementations in order to allow
// them to change over time. This interface is strictly for use
// by the leaderelection code.
type Interface interface {
// Get returns the LeaderElectionRecord
Get() (*LeaderElectionRecord, error)
// Create attempts to create a LeaderElectionRecord
Create(ler LeaderElectionRecord) error
// Update will update and existing LeaderElectionRecord
Update(ler LeaderElectionRecord) error
// RecordEvent is used to record events
RecordEvent(string)
// Identity will return the locks Identity
Identity() string
// Describe is used to convert details on current resource lock
// into a string
Describe() string
}
// Manufacture will create a lock of a given type according to the input parameters
func New(lockType string, ns string, name string, client corev1.CoreV1Interface, rlc ResourceLockConfig) (Interface, error) {
switch lockType {
case EndpointsResourceLock:
return &EndpointsLock{
EndpointsMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
},
Client: client,
LockConfig: rlc,
}, nil
case ConfigMapsResourceLock:
return &ConfigMapLock{
ConfigMapMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
},
Client: client,
LockConfig: rlc,
}, nil
default:
return nil, fmt.Errorf("Invalid lock-type %s", lockType)
}
}

View File

@@ -1,7 +0,0 @@
reviewers:
- wojtek-t
- eparis
- krousey
- jayunit100
- fgrzadkowski
- tmrts

View File

@@ -1,206 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pager
import (
"context"
"fmt"
"reflect"
"testing"
"k8s.io/apimachinery/pkg/api/errors"
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
)
func list(count int, rv string) *metainternalversion.List {
var list metainternalversion.List
for i := 0; i < count; i++ {
list.Items = append(list.Items, &metav1beta1.PartialObjectMetadata{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%d", i),
},
})
}
list.ResourceVersion = rv
return &list
}
type testPager struct {
t *testing.T
rv string
index int
remaining int
last int
continuing bool
done bool
expectPage int64
}
func (p *testPager) reset() {
p.continuing = false
p.remaining += p.index
p.index = 0
p.last = 0
p.done = false
}
func (p *testPager) PagedList(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
if p.done {
p.t.Errorf("did not expect additional call to paged list")
return nil, fmt.Errorf("unexpected list call")
}
expectedContinue := fmt.Sprintf("%s:%d", p.rv, p.last)
if options.Limit != p.expectPage || (p.continuing && options.Continue != expectedContinue) {
p.t.Errorf("invariant violated, expected limit %d and continue %s, got %#v", p.expectPage, expectedContinue, options)
return nil, fmt.Errorf("invariant violated")
}
var list metainternalversion.List
total := options.Limit
if total == 0 {
total = int64(p.remaining)
}
for i := int64(0); i < total; i++ {
if p.remaining <= 0 {
break
}
list.Items = append(list.Items, &metav1beta1.PartialObjectMetadata{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%d", p.index),
},
})
p.remaining--
p.index++
}
p.last = p.index
if p.remaining > 0 {
list.Continue = fmt.Sprintf("%s:%d", p.rv, p.last)
p.continuing = true
} else {
p.done = true
}
list.ResourceVersion = p.rv
return &list, nil
}
func (p *testPager) ExpiresOnSecondPage(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
if p.continuing {
p.done = true
return nil, errors.NewResourceExpired("this list has expired")
}
return p.PagedList(ctx, options)
}
func (p *testPager) ExpiresOnSecondPageThenFullList(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
if p.continuing {
p.reset()
p.expectPage = 0
return nil, errors.NewResourceExpired("this list has expired")
}
return p.PagedList(ctx, options)
}
func TestListPager_List(t *testing.T) {
type fields struct {
PageSize int64
PageFn ListPageFunc
FullListIfExpired bool
}
type args struct {
ctx context.Context
options metav1.ListOptions
}
tests := []struct {
name string
fields fields
args args
want runtime.Object
wantErr bool
isExpired bool
}{
{
name: "empty page",
fields: fields{PageSize: 10, PageFn: (&testPager{t: t, expectPage: 10, remaining: 0, rv: "rv:20"}).PagedList},
args: args{},
want: list(0, "rv:20"),
},
{
name: "one page",
fields: fields{PageSize: 10, PageFn: (&testPager{t: t, expectPage: 10, remaining: 9, rv: "rv:20"}).PagedList},
args: args{},
want: list(9, "rv:20"),
},
{
name: "one full page",
fields: fields{PageSize: 10, PageFn: (&testPager{t: t, expectPage: 10, remaining: 10, rv: "rv:20"}).PagedList},
args: args{},
want: list(10, "rv:20"),
},
{
name: "two pages",
fields: fields{PageSize: 10, PageFn: (&testPager{t: t, expectPage: 10, remaining: 11, rv: "rv:20"}).PagedList},
args: args{},
want: list(11, "rv:20"),
},
{
name: "three pages",
fields: fields{PageSize: 10, PageFn: (&testPager{t: t, expectPage: 10, remaining: 21, rv: "rv:20"}).PagedList},
args: args{},
want: list(21, "rv:20"),
},
{
name: "expires on second page",
fields: fields{PageSize: 10, PageFn: (&testPager{t: t, expectPage: 10, remaining: 21, rv: "rv:20"}).ExpiresOnSecondPage},
args: args{},
wantErr: true,
isExpired: true,
},
{
name: "expires on second page and then lists",
fields: fields{
FullListIfExpired: true,
PageSize: 10,
PageFn: (&testPager{t: t, expectPage: 10, remaining: 21, rv: "rv:20"}).ExpiresOnSecondPageThenFullList,
},
args: args{},
want: list(21, "rv:20"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &ListPager{
PageSize: tt.fields.PageSize,
PageFn: tt.fields.PageFn,
FullListIfExpired: tt.fields.FullListIfExpired,
}
got, err := p.List(tt.args.ctx, tt.args.options)
if (err != nil) != tt.wantErr {
t.Errorf("ListPager.List() error = %v, wantErr %v", err, tt.wantErr)
return
}
if tt.isExpired != errors.IsResourceExpired(err) {
t.Errorf("ListPager.List() error = %v, isExpired %v", err, tt.isExpired)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("ListPager.List() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -1,19 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package portforward adds support for SSH-like port forwarding from the client's
// local host to remote containers.
package portforward // import "k8s.io/client-go/tools/portforward"

View File

@@ -1,419 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package portforward
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"strconv"
"strings"
"sync"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/apimachinery/pkg/util/runtime"
)
// TODO move to API machinery and re-unify with kubelet/server/portfoward
// The subprotocol "portforward.k8s.io" is used for port forwarding.
const PortForwardProtocolV1Name = "portforward.k8s.io"
// PortForwarder knows how to listen for local connections and forward them to
// a remote pod via an upgraded HTTP request.
type PortForwarder struct {
addresses []listenAddress
ports []ForwardedPort
stopChan <-chan struct{}
dialer httpstream.Dialer
streamConn httpstream.Connection
listeners []io.Closer
Ready chan struct{}
requestIDLock sync.Mutex
requestID int
out io.Writer
errOut io.Writer
}
// ForwardedPort contains a Local:Remote port pairing.
type ForwardedPort struct {
Local uint16
Remote uint16
}
/*
valid port specifications:
5000
- forwards from localhost:5000 to pod:5000
8888:5000
- forwards from localhost:8888 to pod:5000
0:5000
:5000
- selects a random available local port,
forwards from localhost:<random port> to pod:5000
*/
func parsePorts(ports []string) ([]ForwardedPort, error) {
var forwards []ForwardedPort
for _, portString := range ports {
parts := strings.Split(portString, ":")
var localString, remoteString string
if len(parts) == 1 {
localString = parts[0]
remoteString = parts[0]
} else if len(parts) == 2 {
localString = parts[0]
if localString == "" {
// support :5000
localString = "0"
}
remoteString = parts[1]
} else {
return nil, fmt.Errorf("Invalid port format '%s'", portString)
}
localPort, err := strconv.ParseUint(localString, 10, 16)
if err != nil {
return nil, fmt.Errorf("Error parsing local port '%s': %s", localString, err)
}
remotePort, err := strconv.ParseUint(remoteString, 10, 16)
if err != nil {
return nil, fmt.Errorf("Error parsing remote port '%s': %s", remoteString, err)
}
if remotePort == 0 {
return nil, fmt.Errorf("Remote port must be > 0")
}
forwards = append(forwards, ForwardedPort{uint16(localPort), uint16(remotePort)})
}
return forwards, nil
}
type listenAddress struct {
address string
protocol string
failureMode string
}
func parseAddresses(addressesToParse []string) ([]listenAddress, error) {
var addresses []listenAddress
parsed := make(map[string]listenAddress)
for _, address := range addressesToParse {
if address == "localhost" {
ip := listenAddress{address: "127.0.0.1", protocol: "tcp4", failureMode: "all"}
parsed[ip.address] = ip
ip = listenAddress{address: "::1", protocol: "tcp6", failureMode: "all"}
parsed[ip.address] = ip
} else if net.ParseIP(address).To4() != nil {
parsed[address] = listenAddress{address: address, protocol: "tcp4", failureMode: "any"}
} else if net.ParseIP(address) != nil {
parsed[address] = listenAddress{address: address, protocol: "tcp6", failureMode: "any"}
} else {
return nil, fmt.Errorf("%s is not a valid IP", address)
}
}
addresses = make([]listenAddress, len(parsed))
id := 0
for _, v := range parsed {
addresses[id] = v
id++
}
return addresses, nil
}
// New creates a new PortForwarder with localhost listen addresses.
func New(dialer httpstream.Dialer, ports []string, stopChan <-chan struct{}, readyChan chan struct{}, out, errOut io.Writer) (*PortForwarder, error) {
return NewOnAddresses(dialer, []string{"localhost"}, ports, stopChan, readyChan, out, errOut)
}
// NewOnAddresses creates a new PortForwarder with custom listen addresses.
func NewOnAddresses(dialer httpstream.Dialer, addresses []string, ports []string, stopChan <-chan struct{}, readyChan chan struct{}, out, errOut io.Writer) (*PortForwarder, error) {
if len(addresses) == 0 {
return nil, errors.New("You must specify at least 1 address")
}
parsedAddresses, err := parseAddresses(addresses)
if err != nil {
return nil, err
}
if len(ports) == 0 {
return nil, errors.New("You must specify at least 1 port")
}
parsedPorts, err := parsePorts(ports)
if err != nil {
return nil, err
}
return &PortForwarder{
dialer: dialer,
addresses: parsedAddresses,
ports: parsedPorts,
stopChan: stopChan,
Ready: readyChan,
out: out,
errOut: errOut,
}, nil
}
// ForwardPorts formats and executes a port forwarding request. The connection will remain
// open until stopChan is closed.
func (pf *PortForwarder) ForwardPorts() error {
defer pf.Close()
var err error
pf.streamConn, _, err = pf.dialer.Dial(PortForwardProtocolV1Name)
if err != nil {
return fmt.Errorf("error upgrading connection: %s", err)
}
defer pf.streamConn.Close()
return pf.forward()
}
// forward dials the remote host specific in req, upgrades the request, starts
// listeners for each port specified in ports, and forwards local connections
// to the remote host via streams.
func (pf *PortForwarder) forward() error {
var err error
listenSuccess := false
for _, port := range pf.ports {
err = pf.listenOnPort(&port)
switch {
case err == nil:
listenSuccess = true
default:
if pf.errOut != nil {
fmt.Fprintf(pf.errOut, "Unable to listen on port %d: %v\n", port.Local, err)
}
}
}
if !listenSuccess {
return fmt.Errorf("Unable to listen on any of the requested ports: %v", pf.ports)
}
if pf.Ready != nil {
close(pf.Ready)
}
// wait for interrupt or conn closure
select {
case <-pf.stopChan:
case <-pf.streamConn.CloseChan():
runtime.HandleError(errors.New("lost connection to pod"))
}
return nil
}
// listenOnPort delegates listener creation and waits for connections on requested bind addresses.
// An error is raised based on address groups (default and localhost) and their failure modes
func (pf *PortForwarder) listenOnPort(port *ForwardedPort) error {
var errors []error
failCounters := make(map[string]int, 2)
successCounters := make(map[string]int, 2)
for _, addr := range pf.addresses {
err := pf.listenOnPortAndAddress(port, addr.protocol, addr.address)
if err != nil {
errors = append(errors, err)
failCounters[addr.failureMode]++
} else {
successCounters[addr.failureMode]++
}
}
if successCounters["all"] == 0 && failCounters["all"] > 0 {
return fmt.Errorf("%s: %v", "Listeners failed to create with the following errors", errors)
}
if failCounters["any"] > 0 {
return fmt.Errorf("%s: %v", "Listeners failed to create with the following errors", errors)
}
return nil
}
// listenOnPortAndAddress delegates listener creation and waits for new connections
// in the background f
func (pf *PortForwarder) listenOnPortAndAddress(port *ForwardedPort, protocol string, address string) error {
listener, err := pf.getListener(protocol, address, port)
if err != nil {
return err
}
pf.listeners = append(pf.listeners, listener)
go pf.waitForConnection(listener, *port)
return nil
}
// getListener creates a listener on the interface targeted by the given hostname on the given port with
// the given protocol. protocol is in net.Listen style which basically admits values like tcp, tcp4, tcp6
func (pf *PortForwarder) getListener(protocol string, hostname string, port *ForwardedPort) (net.Listener, error) {
listener, err := net.Listen(protocol, net.JoinHostPort(hostname, strconv.Itoa(int(port.Local))))
if err != nil {
return nil, fmt.Errorf("Unable to create listener: Error %s", err)
}
listenerAddress := listener.Addr().String()
host, localPort, _ := net.SplitHostPort(listenerAddress)
localPortUInt, err := strconv.ParseUint(localPort, 10, 16)
if err != nil {
fmt.Fprintf(pf.out, "Failed to forward from %s:%d -> %d\n", hostname, localPortUInt, port.Remote)
return nil, fmt.Errorf("Error parsing local port: %s from %s (%s)", err, listenerAddress, host)
}
port.Local = uint16(localPortUInt)
if pf.out != nil {
fmt.Fprintf(pf.out, "Forwarding from %s -> %d\n", net.JoinHostPort(hostname, strconv.Itoa(int(localPortUInt))), port.Remote)
}
return listener, nil
}
// waitForConnection waits for new connections to listener and handles them in
// the background.
func (pf *PortForwarder) waitForConnection(listener net.Listener, port ForwardedPort) {
for {
conn, err := listener.Accept()
if err != nil {
// TODO consider using something like https://github.com/hydrogen18/stoppableListener?
if !strings.Contains(strings.ToLower(err.Error()), "use of closed network connection") {
runtime.HandleError(fmt.Errorf("Error accepting connection on port %d: %v", port.Local, err))
}
return
}
go pf.handleConnection(conn, port)
}
}
func (pf *PortForwarder) nextRequestID() int {
pf.requestIDLock.Lock()
defer pf.requestIDLock.Unlock()
id := pf.requestID
pf.requestID++
return id
}
// handleConnection copies data between the local connection and the stream to
// the remote server.
func (pf *PortForwarder) handleConnection(conn net.Conn, port ForwardedPort) {
defer conn.Close()
if pf.out != nil {
fmt.Fprintf(pf.out, "Handling connection for %d\n", port.Local)
}
requestID := pf.nextRequestID()
// create error stream
headers := http.Header{}
headers.Set(v1.StreamType, v1.StreamTypeError)
headers.Set(v1.PortHeader, fmt.Sprintf("%d", port.Remote))
headers.Set(v1.PortForwardRequestIDHeader, strconv.Itoa(requestID))
errorStream, err := pf.streamConn.CreateStream(headers)
if err != nil {
runtime.HandleError(fmt.Errorf("error creating error stream for port %d -> %d: %v", port.Local, port.Remote, err))
return
}
// we're not writing to this stream
errorStream.Close()
errorChan := make(chan error)
go func() {
message, err := ioutil.ReadAll(errorStream)
switch {
case err != nil:
errorChan <- fmt.Errorf("error reading from error stream for port %d -> %d: %v", port.Local, port.Remote, err)
case len(message) > 0:
errorChan <- fmt.Errorf("an error occurred forwarding %d -> %d: %v", port.Local, port.Remote, string(message))
}
close(errorChan)
}()
// create data stream
headers.Set(v1.StreamType, v1.StreamTypeData)
dataStream, err := pf.streamConn.CreateStream(headers)
if err != nil {
runtime.HandleError(fmt.Errorf("error creating forwarding stream for port %d -> %d: %v", port.Local, port.Remote, err))
return
}
localError := make(chan struct{})
remoteDone := make(chan struct{})
go func() {
// Copy from the remote side to the local port.
if _, err := io.Copy(conn, dataStream); err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
runtime.HandleError(fmt.Errorf("error copying from remote stream to local connection: %v", err))
}
// inform the select below that the remote copy is done
close(remoteDone)
}()
go func() {
// inform server we're not sending any more data after copy unblocks
defer dataStream.Close()
// Copy from the local port to the remote side.
if _, err := io.Copy(dataStream, conn); err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
runtime.HandleError(fmt.Errorf("error copying from local connection to remote stream: %v", err))
// break out of the select below without waiting for the other copy to finish
close(localError)
}
}()
// wait for either a local->remote error or for copying from remote->local to finish
select {
case <-remoteDone:
case <-localError:
}
// always expect something on errorChan (it may be nil)
err = <-errorChan
if err != nil {
runtime.HandleError(err)
}
}
func (pf *PortForwarder) Close() {
// stop all listeners
for _, l := range pf.listeners {
if err := l.Close(); err != nil {
runtime.HandleError(fmt.Errorf("error closing listener: %v", err))
}
}
}
// GetPorts will return the ports that were forwarded; this can be used to
// retrieve the locally-bound port in cases where the input was port 0. This
// function will signal an error if the Ready channel is nil or if the
// listeners are not ready yet; this function will succeed after the Ready
// channel has been closed.
func (pf *PortForwarder) GetPorts() ([]ForwardedPort, error) {
if pf.Ready == nil {
return nil, fmt.Errorf("no Ready channel provided")
}
select {
case <-pf.Ready:
return pf.ports, nil
default:
return nil, fmt.Errorf("listeners not ready")
}
}

View File

@@ -1,256 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package portforward
import (
"net"
"os"
"reflect"
"sort"
"strings"
"testing"
"k8s.io/apimachinery/pkg/util/httpstream"
)
type fakeDialer struct {
dialed bool
conn httpstream.Connection
err error
negotiatedProtocol string
}
func (d *fakeDialer) Dial(protocols ...string) (httpstream.Connection, string, error) {
d.dialed = true
return d.conn, d.negotiatedProtocol, d.err
}
func TestParsePortsAndNew(t *testing.T) {
tests := []struct {
input []string
addresses []string
expectedPorts []ForwardedPort
expectedAddresses []listenAddress
expectPortParseError bool
expectAddressParseError bool
expectNewError bool
}{
{input: []string{}, expectNewError: true},
{input: []string{"a"}, expectPortParseError: true, expectAddressParseError: false, expectNewError: true},
{input: []string{":a"}, expectPortParseError: true, expectAddressParseError: false, expectNewError: true},
{input: []string{"-1"}, expectPortParseError: true, expectAddressParseError: false, expectNewError: true},
{input: []string{"65536"}, expectPortParseError: true, expectAddressParseError: false, expectNewError: true},
{input: []string{"0"}, expectPortParseError: true, expectAddressParseError: false, expectNewError: true},
{input: []string{"0:0"}, expectPortParseError: true, expectAddressParseError: false, expectNewError: true},
{input: []string{"a:5000"}, expectPortParseError: true, expectAddressParseError: false, expectNewError: true},
{input: []string{"5000:a"}, expectPortParseError: true, expectAddressParseError: false, expectNewError: true},
{input: []string{"5000:5000"}, addresses: []string{"127.0.0.257"}, expectPortParseError: false, expectAddressParseError: true, expectNewError: true},
{input: []string{"5000:5000"}, addresses: []string{"::g"}, expectPortParseError: false, expectAddressParseError: true, expectNewError: true},
{input: []string{"5000:5000"}, addresses: []string{"domain.invalid"}, expectPortParseError: false, expectAddressParseError: true, expectNewError: true},
{
input: []string{"5000:5000"},
addresses: []string{"localhost"},
expectedPorts: []ForwardedPort{
{5000, 5000},
},
expectedAddresses: []listenAddress{
{protocol: "tcp4", address: "127.0.0.1", failureMode: "all"},
{protocol: "tcp6", address: "::1", failureMode: "all"},
},
},
{
input: []string{"5000:5000"},
addresses: []string{"localhost", "127.0.0.1"},
expectedPorts: []ForwardedPort{
{5000, 5000},
},
expectedAddresses: []listenAddress{
{protocol: "tcp4", address: "127.0.0.1", failureMode: "any"},
{protocol: "tcp6", address: "::1", failureMode: "all"},
},
},
{
input: []string{"5000", "5000:5000", "8888:5000", "5000:8888", ":5000", "0:5000"},
addresses: []string{"127.0.0.1", "::1"},
expectedPorts: []ForwardedPort{
{5000, 5000},
{5000, 5000},
{8888, 5000},
{5000, 8888},
{0, 5000},
{0, 5000},
},
expectedAddresses: []listenAddress{
{protocol: "tcp4", address: "127.0.0.1", failureMode: "any"},
{protocol: "tcp6", address: "::1", failureMode: "any"},
},
},
}
for i, test := range tests {
parsedPorts, err := parsePorts(test.input)
haveError := err != nil
if e, a := test.expectPortParseError, haveError; e != a {
t.Fatalf("%d: parsePorts: error expected=%t, got %t: %s", i, e, a, err)
}
// default to localhost
if len(test.addresses) == 0 && len(test.expectedAddresses) == 0 {
test.addresses = []string{"localhost"}
test.expectedAddresses = []listenAddress{{protocol: "tcp4", address: "127.0.0.1"}, {protocol: "tcp6", address: "::1"}}
}
// assert address parser
parsedAddresses, err := parseAddresses(test.addresses)
haveError = err != nil
if e, a := test.expectAddressParseError, haveError; e != a {
t.Fatalf("%d: parseAddresses: error expected=%t, got %t: %s", i, e, a, err)
}
dialer := &fakeDialer{}
expectedStopChan := make(chan struct{})
readyChan := make(chan struct{})
var pf *PortForwarder
if len(test.addresses) > 0 {
pf, err = NewOnAddresses(dialer, test.addresses, test.input, expectedStopChan, readyChan, os.Stdout, os.Stderr)
} else {
pf, err = New(dialer, test.input, expectedStopChan, readyChan, os.Stdout, os.Stderr)
}
haveError = err != nil
if e, a := test.expectNewError, haveError; e != a {
t.Fatalf("%d: New: error expected=%t, got %t: %s", i, e, a, err)
}
if test.expectPortParseError || test.expectAddressParseError || test.expectNewError {
continue
}
sort.Slice(test.expectedAddresses, func(i, j int) bool { return test.expectedAddresses[i].address < test.expectedAddresses[j].address })
sort.Slice(parsedAddresses, func(i, j int) bool { return parsedAddresses[i].address < parsedAddresses[j].address })
if !reflect.DeepEqual(test.expectedAddresses, parsedAddresses) {
t.Fatalf("%d: expectedAddresses: %v, got: %v", i, test.expectedAddresses, parsedAddresses)
}
for pi, expectedPort := range test.expectedPorts {
if e, a := expectedPort.Local, parsedPorts[pi].Local; e != a {
t.Fatalf("%d: local expected: %d, got: %d", i, e, a)
}
if e, a := expectedPort.Remote, parsedPorts[pi].Remote; e != a {
t.Fatalf("%d: remote expected: %d, got: %d", i, e, a)
}
}
if dialer.dialed {
t.Fatalf("%d: expected not dialed", i)
}
if _, portErr := pf.GetPorts(); portErr == nil {
t.Fatalf("%d: GetPorts: error expected but got nil", i)
}
// mock-signal the Ready channel
close(readyChan)
if ports, portErr := pf.GetPorts(); portErr != nil {
t.Fatalf("%d: GetPorts: unable to retrieve ports: %s", i, portErr)
} else if !reflect.DeepEqual(test.expectedPorts, ports) {
t.Fatalf("%d: ports: expected %#v, got %#v", i, test.expectedPorts, ports)
}
if e, a := expectedStopChan, pf.stopChan; e != a {
t.Fatalf("%d: stopChan: expected %#v, got %#v", i, e, a)
}
if pf.Ready == nil {
t.Fatalf("%d: Ready should be non-nil", i)
}
}
}
type GetListenerTestCase struct {
Hostname string
Protocol string
ShouldRaiseError bool
ExpectedListenerAddress string
}
func TestGetListener(t *testing.T) {
var pf PortForwarder
testCases := []GetListenerTestCase{
{
Hostname: "localhost",
Protocol: "tcp4",
ShouldRaiseError: false,
ExpectedListenerAddress: "127.0.0.1",
},
{
Hostname: "127.0.0.1",
Protocol: "tcp4",
ShouldRaiseError: false,
ExpectedListenerAddress: "127.0.0.1",
},
{
Hostname: "::1",
Protocol: "tcp6",
ShouldRaiseError: false,
ExpectedListenerAddress: "::1",
},
{
Hostname: "::1",
Protocol: "tcp4",
ShouldRaiseError: true,
},
{
Hostname: "127.0.0.1",
Protocol: "tcp6",
ShouldRaiseError: true,
},
}
for i, testCase := range testCases {
expectedListenerPort := "12345"
listener, err := pf.getListener(testCase.Protocol, testCase.Hostname, &ForwardedPort{12345, 12345})
if err != nil && strings.Contains(err.Error(), "cannot assign requested address") {
t.Logf("Can't test #%d: %v", i, err)
continue
}
errorRaised := err != nil
if testCase.ShouldRaiseError != errorRaised {
t.Errorf("Test case #%d failed: Data %v an error has been raised(%t) where it should not (or reciprocally): %v", i, testCase, testCase.ShouldRaiseError, err)
continue
}
if errorRaised {
continue
}
if listener == nil {
t.Errorf("Test case #%d did not raise an error but failed in initializing listener", i)
continue
}
host, port, _ := net.SplitHostPort(listener.Addr().String())
t.Logf("Asked a %s forward for: %s:%v, got listener %s:%s, expected: %s", testCase.Protocol, testCase.Hostname, 12345, host, port, expectedListenerPort)
if host != testCase.ExpectedListenerAddress {
t.Errorf("Test case #%d failed: Listener does not listen on expected address: asked '%v' got '%v'", i, testCase.ExpectedListenerAddress, host)
}
if port != expectedListenerPort {
t.Errorf("Test case #%d failed: Listener does not listen on expected port: asked %v got %v", i, expectedListenerPort, port)
}
listener.Close()
}
}

View File

@@ -1,27 +0,0 @@
reviewers:
- lavalamp
- smarterclayton
- wojtek-t
- deads2k
- derekwaynecarr
- caesarxuchao
- vishh
- mikedanese
- liggitt
- nikhiljindal
- erictune
- pmorie
- dchen1107
- saad-ali
- luxas
- yifan-gu
- eparis
- mwielgus
- timothysc
- jsafrane
- dims
- krousey
- a-robinson
- aveshagarwal
- resouer
- cjcullen

View File

@@ -1,924 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package record
import (
"encoding/json"
"fmt"
"math/rand"
"net/http"
"strconv"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/kubernetes/scheme"
restclient "k8s.io/client-go/rest"
ref "k8s.io/client-go/tools/reference"
)
type testEventSink struct {
OnCreate func(e *v1.Event) (*v1.Event, error)
OnUpdate func(e *v1.Event) (*v1.Event, error)
OnPatch func(e *v1.Event, p []byte) (*v1.Event, error)
}
// CreateEvent records the event for testing.
func (t *testEventSink) Create(e *v1.Event) (*v1.Event, error) {
if t.OnCreate != nil {
return t.OnCreate(e)
}
return e, nil
}
// UpdateEvent records the event for testing.
func (t *testEventSink) Update(e *v1.Event) (*v1.Event, error) {
if t.OnUpdate != nil {
return t.OnUpdate(e)
}
return e, nil
}
// PatchEvent records the event for testing.
func (t *testEventSink) Patch(e *v1.Event, p []byte) (*v1.Event, error) {
if t.OnPatch != nil {
return t.OnPatch(e, p)
}
return e, nil
}
type OnCreateFunc func(*v1.Event) (*v1.Event, error)
func OnCreateFactory(testCache map[string]*v1.Event, createEvent chan<- *v1.Event) OnCreateFunc {
return func(event *v1.Event) (*v1.Event, error) {
testCache[getEventKey(event)] = event
createEvent <- event
return event, nil
}
}
type OnPatchFunc func(*v1.Event, []byte) (*v1.Event, error)
func OnPatchFactory(testCache map[string]*v1.Event, patchEvent chan<- *v1.Event) OnPatchFunc {
return func(event *v1.Event, patch []byte) (*v1.Event, error) {
cachedEvent, found := testCache[getEventKey(event)]
if !found {
return nil, fmt.Errorf("unexpected error: couldn't find Event in testCache.")
}
originalData, err := json.Marshal(cachedEvent)
if err != nil {
return nil, fmt.Errorf("unexpected error: %v", err)
}
patched, err := strategicpatch.StrategicMergePatch(originalData, patch, event)
if err != nil {
return nil, fmt.Errorf("unexpected error: %v", err)
}
patchedObj := &v1.Event{}
err = json.Unmarshal(patched, patchedObj)
if err != nil {
return nil, fmt.Errorf("unexpected error: %v", err)
}
patchEvent <- patchedObj
return patchedObj, nil
}
}
func TestEventf(t *testing.T) {
testPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
SelfLink: "/api/version/pods/foo",
Name: "foo",
Namespace: "baz",
UID: "bar",
},
}
testPod2 := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
SelfLink: "/api/version/pods/foo",
Name: "foo",
Namespace: "baz",
UID: "differentUid",
},
}
testRef, err := ref.GetPartialReference(scheme.Scheme, testPod, "spec.containers[2]")
if err != nil {
t.Fatal(err)
}
testRef2, err := ref.GetPartialReference(scheme.Scheme, testPod2, "spec.containers[3]")
if err != nil {
t.Fatal(err)
}
table := []struct {
obj k8sruntime.Object
eventtype string
reason string
messageFmt string
elements []interface{}
expect *v1.Event
expectLog string
expectUpdate bool
}{
{
obj: testRef,
eventtype: v1.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "bar",
APIVersion: "version",
FieldPath: "spec.containers[2]",
},
Reason: "Started",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 1,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: false,
},
{
obj: testPod,
eventtype: v1.EventTypeNormal,
reason: "Killed",
messageFmt: "some other verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "bar",
APIVersion: "version",
},
Reason: "Killed",
Message: "some other verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 1,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'Killed' some other verbose message: 1`,
expectUpdate: false,
},
{
obj: testRef,
eventtype: v1.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "bar",
APIVersion: "version",
FieldPath: "spec.containers[2]",
},
Reason: "Started",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 2,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: true,
},
{
obj: testRef2,
eventtype: v1.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "differentUid",
APIVersion: "version",
FieldPath: "spec.containers[3]",
},
Reason: "Started",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 1,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: false,
},
{
obj: testRef,
eventtype: v1.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "bar",
APIVersion: "version",
FieldPath: "spec.containers[2]",
},
Reason: "Started",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 3,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: true,
},
{
obj: testRef2,
eventtype: v1.EventTypeNormal,
reason: "Stopped",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "differentUid",
APIVersion: "version",
FieldPath: "spec.containers[3]",
},
Reason: "Stopped",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 1,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Stopped' some verbose message: 1`,
expectUpdate: false,
},
{
obj: testRef2,
eventtype: v1.EventTypeNormal,
reason: "Stopped",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "differentUid",
APIVersion: "version",
FieldPath: "spec.containers[3]",
},
Reason: "Stopped",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 2,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Stopped' some verbose message: 1`,
expectUpdate: true,
},
}
testCache := map[string]*v1.Event{}
logCalled := make(chan struct{})
createEvent := make(chan *v1.Event)
updateEvent := make(chan *v1.Event)
patchEvent := make(chan *v1.Event)
testEvents := testEventSink{
OnCreate: OnCreateFactory(testCache, createEvent),
OnUpdate: func(event *v1.Event) (*v1.Event, error) {
updateEvent <- event
return event, nil
},
OnPatch: OnPatchFactory(testCache, patchEvent),
}
eventBroadcaster := NewBroadcasterForTests(0)
sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents)
clock := clock.NewFakeClock(time.Now())
recorder := recorderWithFakeClock(v1.EventSource{Component: "eventTest"}, eventBroadcaster, clock)
for index, item := range table {
clock.Step(1 * time.Second)
logWatcher := eventBroadcaster.StartLogging(func(formatter string, args ...interface{}) {
if e, a := item.expectLog, fmt.Sprintf(formatter, args...); e != a {
t.Errorf("Expected '%v', got '%v'", e, a)
}
logCalled <- struct{}{}
})
recorder.Eventf(item.obj, item.eventtype, item.reason, item.messageFmt, item.elements...)
<-logCalled
// validate event
if item.expectUpdate {
actualEvent := <-patchEvent
validateEvent(strconv.Itoa(index), actualEvent, item.expect, t)
} else {
actualEvent := <-createEvent
validateEvent(strconv.Itoa(index), actualEvent, item.expect, t)
}
logWatcher.Stop()
}
sinkWatcher.Stop()
}
func recorderWithFakeClock(eventSource v1.EventSource, eventBroadcaster EventBroadcaster, clock clock.Clock) EventRecorder {
return &recorderImpl{scheme.Scheme, eventSource, eventBroadcaster.(*eventBroadcasterImpl).Broadcaster, clock}
}
func TestWriteEventError(t *testing.T) {
type entry struct {
timesToSendError int
attemptsWanted int
err error
}
table := map[string]*entry{
"giveUp1": {
timesToSendError: 1000,
attemptsWanted: 1,
err: &restclient.RequestConstructionError{},
},
"giveUp2": {
timesToSendError: 1000,
attemptsWanted: 1,
err: &errors.StatusError{},
},
"retry1": {
timesToSendError: 1000,
attemptsWanted: 12,
err: &errors.UnexpectedObjectError{},
},
"retry2": {
timesToSendError: 1000,
attemptsWanted: 12,
err: fmt.Errorf("A weird error"),
},
"succeedEventually": {
timesToSendError: 2,
attemptsWanted: 2,
err: fmt.Errorf("A weird error"),
},
}
clock := clock.IntervalClock{Time: time.Now(), Duration: time.Second}
eventCorrelator := NewEventCorrelator(&clock)
randGen := rand.New(rand.NewSource(time.Now().UnixNano()))
for caseName, ent := range table {
attempts := 0
sink := &testEventSink{
OnCreate: func(event *v1.Event) (*v1.Event, error) {
attempts++
if attempts < ent.timesToSendError {
return nil, ent.err
}
return event, nil
},
}
ev := &v1.Event{}
recordToSink(sink, ev, eventCorrelator, randGen, 0)
if attempts != ent.attemptsWanted {
t.Errorf("case %v: wanted %d, got %d attempts", caseName, ent.attemptsWanted, attempts)
}
}
}
func TestUpdateExpiredEvent(t *testing.T) {
clock := clock.IntervalClock{Time: time.Now(), Duration: time.Second}
eventCorrelator := NewEventCorrelator(&clock)
randGen := rand.New(rand.NewSource(time.Now().UnixNano()))
var createdEvent *v1.Event
sink := &testEventSink{
OnPatch: func(*v1.Event, []byte) (*v1.Event, error) {
return nil, &errors.StatusError{
ErrStatus: metav1.Status{
Code: http.StatusNotFound,
Reason: metav1.StatusReasonNotFound,
}}
},
OnCreate: func(event *v1.Event) (*v1.Event, error) {
createdEvent = event
return event, nil
},
}
ev := &v1.Event{}
ev.ResourceVersion = "updated-resource-version"
ev.Count = 2
recordToSink(sink, ev, eventCorrelator, randGen, 0)
if createdEvent == nil {
t.Error("Event did not get created after patch failed")
return
}
if createdEvent.ResourceVersion != "" {
t.Errorf("Event did not have its resource version cleared, was %s", createdEvent.ResourceVersion)
}
}
func TestLotsOfEvents(t *testing.T) {
recorderCalled := make(chan struct{})
loggerCalled := make(chan struct{})
// Fail each event a few times to ensure there's some load on the tested code.
var counts [1000]int
testEvents := testEventSink{
OnCreate: func(event *v1.Event) (*v1.Event, error) {
num, err := strconv.Atoi(event.Message)
if err != nil {
t.Error(err)
return event, nil
}
counts[num]++
if counts[num] < 5 {
return nil, fmt.Errorf("fake error")
}
recorderCalled <- struct{}{}
return event, nil
},
}
eventBroadcaster := NewBroadcasterForTests(0)
sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents)
logWatcher := eventBroadcaster.StartLogging(func(formatter string, args ...interface{}) {
loggerCalled <- struct{}{}
})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "eventTest"})
for i := 0; i < maxQueuedEvents; i++ {
// we want a unique object to stop spam filtering
ref := &v1.ObjectReference{
Kind: "Pod",
Name: fmt.Sprintf("foo-%v", i),
Namespace: "baz",
UID: "bar",
APIVersion: "version",
}
// we need to vary the reason to prevent aggregation
go recorder.Eventf(ref, v1.EventTypeNormal, "Reason-"+string(i), strconv.Itoa(i))
}
// Make sure no events were dropped by either of the listeners.
for i := 0; i < maxQueuedEvents; i++ {
<-recorderCalled
<-loggerCalled
}
// Make sure that every event was attempted 5 times
for i := 0; i < maxQueuedEvents; i++ {
if counts[i] < 5 {
t.Errorf("Only attempted to record event '%d' %d times.", i, counts[i])
}
}
sinkWatcher.Stop()
logWatcher.Stop()
}
func TestEventfNoNamespace(t *testing.T) {
testPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
SelfLink: "/api/version/pods/foo",
Name: "foo",
UID: "bar",
},
}
testRef, err := ref.GetPartialReference(scheme.Scheme, testPod, "spec.containers[2]")
if err != nil {
t.Fatal(err)
}
table := []struct {
obj k8sruntime.Object
eventtype string
reason string
messageFmt string
elements []interface{}
expect *v1.Event
expectLog string
expectUpdate bool
}{
{
obj: testRef,
eventtype: v1.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "",
UID: "bar",
APIVersion: "version",
FieldPath: "spec.containers[2]",
},
Reason: "Started",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 1,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: false,
},
}
testCache := map[string]*v1.Event{}
logCalled := make(chan struct{})
createEvent := make(chan *v1.Event)
updateEvent := make(chan *v1.Event)
patchEvent := make(chan *v1.Event)
testEvents := testEventSink{
OnCreate: OnCreateFactory(testCache, createEvent),
OnUpdate: func(event *v1.Event) (*v1.Event, error) {
updateEvent <- event
return event, nil
},
OnPatch: OnPatchFactory(testCache, patchEvent),
}
eventBroadcaster := NewBroadcasterForTests(0)
sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents)
clock := clock.NewFakeClock(time.Now())
recorder := recorderWithFakeClock(v1.EventSource{Component: "eventTest"}, eventBroadcaster, clock)
for index, item := range table {
clock.Step(1 * time.Second)
logWatcher := eventBroadcaster.StartLogging(func(formatter string, args ...interface{}) {
if e, a := item.expectLog, fmt.Sprintf(formatter, args...); e != a {
t.Errorf("Expected '%v', got '%v'", e, a)
}
logCalled <- struct{}{}
})
recorder.Eventf(item.obj, item.eventtype, item.reason, item.messageFmt, item.elements...)
<-logCalled
// validate event
if item.expectUpdate {
actualEvent := <-patchEvent
validateEvent(strconv.Itoa(index), actualEvent, item.expect, t)
} else {
actualEvent := <-createEvent
validateEvent(strconv.Itoa(index), actualEvent, item.expect, t)
}
logWatcher.Stop()
}
sinkWatcher.Stop()
}
func TestMultiSinkCache(t *testing.T) {
testPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
SelfLink: "/api/version/pods/foo",
Name: "foo",
Namespace: "baz",
UID: "bar",
},
}
testPod2 := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
SelfLink: "/api/version/pods/foo",
Name: "foo",
Namespace: "baz",
UID: "differentUid",
},
}
testRef, err := ref.GetPartialReference(scheme.Scheme, testPod, "spec.containers[2]")
if err != nil {
t.Fatal(err)
}
testRef2, err := ref.GetPartialReference(scheme.Scheme, testPod2, "spec.containers[3]")
if err != nil {
t.Fatal(err)
}
table := []struct {
obj k8sruntime.Object
eventtype string
reason string
messageFmt string
elements []interface{}
expect *v1.Event
expectLog string
expectUpdate bool
}{
{
obj: testRef,
eventtype: v1.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "bar",
APIVersion: "version",
FieldPath: "spec.containers[2]",
},
Reason: "Started",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 1,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: false,
},
{
obj: testPod,
eventtype: v1.EventTypeNormal,
reason: "Killed",
messageFmt: "some other verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "bar",
APIVersion: "version",
},
Reason: "Killed",
Message: "some other verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 1,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'Killed' some other verbose message: 1`,
expectUpdate: false,
},
{
obj: testRef,
eventtype: v1.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "bar",
APIVersion: "version",
FieldPath: "spec.containers[2]",
},
Reason: "Started",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 2,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: true,
},
{
obj: testRef2,
eventtype: v1.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "differentUid",
APIVersion: "version",
FieldPath: "spec.containers[3]",
},
Reason: "Started",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 1,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: false,
},
{
obj: testRef,
eventtype: v1.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "bar",
APIVersion: "version",
FieldPath: "spec.containers[2]",
},
Reason: "Started",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 3,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: true,
},
{
obj: testRef2,
eventtype: v1.EventTypeNormal,
reason: "Stopped",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "differentUid",
APIVersion: "version",
FieldPath: "spec.containers[3]",
},
Reason: "Stopped",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 1,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Stopped' some verbose message: 1`,
expectUpdate: false,
},
{
obj: testRef2,
eventtype: v1.EventTypeNormal,
reason: "Stopped",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "differentUid",
APIVersion: "version",
FieldPath: "spec.containers[3]",
},
Reason: "Stopped",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 2,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Stopped' some verbose message: 1`,
expectUpdate: true,
},
}
testCache := map[string]*v1.Event{}
createEvent := make(chan *v1.Event)
updateEvent := make(chan *v1.Event)
patchEvent := make(chan *v1.Event)
testEvents := testEventSink{
OnCreate: OnCreateFactory(testCache, createEvent),
OnUpdate: func(event *v1.Event) (*v1.Event, error) {
updateEvent <- event
return event, nil
},
OnPatch: OnPatchFactory(testCache, patchEvent),
}
testCache2 := map[string]*v1.Event{}
createEvent2 := make(chan *v1.Event)
updateEvent2 := make(chan *v1.Event)
patchEvent2 := make(chan *v1.Event)
testEvents2 := testEventSink{
OnCreate: OnCreateFactory(testCache2, createEvent2),
OnUpdate: func(event *v1.Event) (*v1.Event, error) {
updateEvent2 <- event
return event, nil
},
OnPatch: OnPatchFactory(testCache2, patchEvent2),
}
eventBroadcaster := NewBroadcasterForTests(0)
clock := clock.NewFakeClock(time.Now())
recorder := recorderWithFakeClock(v1.EventSource{Component: "eventTest"}, eventBroadcaster, clock)
sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents)
for index, item := range table {
clock.Step(1 * time.Second)
recorder.Eventf(item.obj, item.eventtype, item.reason, item.messageFmt, item.elements...)
// validate event
if item.expectUpdate {
actualEvent := <-patchEvent
validateEvent(strconv.Itoa(index), actualEvent, item.expect, t)
} else {
actualEvent := <-createEvent
validateEvent(strconv.Itoa(index), actualEvent, item.expect, t)
}
}
// Another StartRecordingToSink call should start to record events with new clean cache.
sinkWatcher2 := eventBroadcaster.StartRecordingToSink(&testEvents2)
for index, item := range table {
clock.Step(1 * time.Second)
recorder.Eventf(item.obj, item.eventtype, item.reason, item.messageFmt, item.elements...)
// validate event
if item.expectUpdate {
actualEvent := <-patchEvent2
validateEvent(strconv.Itoa(index), actualEvent, item.expect, t)
} else {
actualEvent := <-createEvent2
validateEvent(strconv.Itoa(index), actualEvent, item.expect, t)
}
}
sinkWatcher.Stop()
sinkWatcher2.Stop()
}

View File

@@ -1,279 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package record
import (
"reflect"
"strings"
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/diff"
)
func makeObjectReference(kind, name, namespace string) v1.ObjectReference {
return v1.ObjectReference{
Kind: kind,
Name: name,
Namespace: namespace,
UID: "C934D34AFB20242",
APIVersion: "version",
FieldPath: "spec.containers{mycontainer}",
}
}
func makeEvent(reason, message string, involvedObject v1.ObjectReference) v1.Event {
eventTime := metav1.Now()
event := v1.Event{
Reason: reason,
Message: message,
InvolvedObject: involvedObject,
Source: v1.EventSource{
Component: "kubelet",
Host: "kublet.node1",
},
Count: 1,
FirstTimestamp: eventTime,
LastTimestamp: eventTime,
Type: v1.EventTypeNormal,
}
return event
}
func makeEvents(num int, template v1.Event) []v1.Event {
events := []v1.Event{}
for i := 0; i < num; i++ {
events = append(events, template)
}
return events
}
func makeUniqueEvents(num int) []v1.Event {
events := []v1.Event{}
kind := "Pod"
for i := 0; i < num; i++ {
reason := strings.Join([]string{"reason", string(i)}, "-")
message := strings.Join([]string{"message", string(i)}, "-")
name := strings.Join([]string{"pod", string(i)}, "-")
namespace := strings.Join([]string{"ns", string(i)}, "-")
involvedObject := makeObjectReference(kind, name, namespace)
events = append(events, makeEvent(reason, message, involvedObject))
}
return events
}
func makeSimilarEvents(num int, template v1.Event, messagePrefix string) []v1.Event {
events := makeEvents(num, template)
for i := range events {
events[i].Message = strings.Join([]string{messagePrefix, string(i), events[i].Message}, "-")
}
return events
}
func setCount(event v1.Event, count int) v1.Event {
event.Count = int32(count)
return event
}
func validateEvent(messagePrefix string, actualEvent *v1.Event, expectedEvent *v1.Event, t *testing.T) (*v1.Event, error) {
recvEvent := *actualEvent
expectCompression := expectedEvent.Count > 1
t.Logf("%v - expectedEvent.Count is %d\n", messagePrefix, expectedEvent.Count)
// Just check that the timestamp was set.
if recvEvent.FirstTimestamp.IsZero() || recvEvent.LastTimestamp.IsZero() {
t.Errorf("%v - timestamp wasn't set: %#v", messagePrefix, recvEvent)
}
actualFirstTimestamp := recvEvent.FirstTimestamp
actualLastTimestamp := recvEvent.LastTimestamp
if actualFirstTimestamp.Equal(&actualLastTimestamp) {
if expectCompression {
t.Errorf("%v - FirstTimestamp (%q) and LastTimestamp (%q) must be different to indicate event compression happened, but were the same. Actual Event: %#v", messagePrefix, actualFirstTimestamp, actualLastTimestamp, recvEvent)
}
} else {
if expectedEvent.Count == 1 {
t.Errorf("%v - FirstTimestamp (%q) and LastTimestamp (%q) must be equal to indicate only one occurrence of the event, but were different. Actual Event: %#v", messagePrefix, actualFirstTimestamp, actualLastTimestamp, recvEvent)
}
}
// Temp clear time stamps for comparison because actual values don't matter for comparison
recvEvent.FirstTimestamp = expectedEvent.FirstTimestamp
recvEvent.LastTimestamp = expectedEvent.LastTimestamp
// Check that name has the right prefix.
if n, en := recvEvent.Name, expectedEvent.Name; !strings.HasPrefix(n, en) {
t.Errorf("%v - Name '%v' does not contain prefix '%v'", messagePrefix, n, en)
}
recvEvent.Name = expectedEvent.Name
if e, a := expectedEvent, &recvEvent; !reflect.DeepEqual(e, a) {
t.Errorf("%v - diff: %s", messagePrefix, diff.ObjectGoPrintDiff(e, a))
}
recvEvent.FirstTimestamp = actualFirstTimestamp
recvEvent.LastTimestamp = actualLastTimestamp
return actualEvent, nil
}
// TestEventAggregatorByReasonFunc ensures that two events are aggregated if they vary only by event.message
func TestEventAggregatorByReasonFunc(t *testing.T) {
event1 := makeEvent("end-of-world", "it was fun", makeObjectReference("Pod", "pod1", "other"))
event2 := makeEvent("end-of-world", "it was awful", makeObjectReference("Pod", "pod1", "other"))
event3 := makeEvent("nevermind", "it was a bug", makeObjectReference("Pod", "pod1", "other"))
aggKey1, localKey1 := EventAggregatorByReasonFunc(&event1)
aggKey2, localKey2 := EventAggregatorByReasonFunc(&event2)
aggKey3, _ := EventAggregatorByReasonFunc(&event3)
if aggKey1 != aggKey2 {
t.Errorf("Expected %v equal %v", aggKey1, aggKey2)
}
if localKey1 == localKey2 {
t.Errorf("Expected %v to not equal %v", aggKey1, aggKey3)
}
if aggKey1 == aggKey3 {
t.Errorf("Expected %v to not equal %v", aggKey1, aggKey3)
}
}
// TestEventAggregatorByReasonMessageFunc validates the proper output for an aggregate message
func TestEventAggregatorByReasonMessageFunc(t *testing.T) {
expectedPrefix := "(combined from similar events): "
event1 := makeEvent("end-of-world", "it was fun", makeObjectReference("Pod", "pod1", "other"))
actual := EventAggregatorByReasonMessageFunc(&event1)
if !strings.HasPrefix(actual, expectedPrefix) {
t.Errorf("Expected %v to begin with prefix %v", actual, expectedPrefix)
}
}
// TestEventCorrelator validates proper counting, aggregation of events
func TestEventCorrelator(t *testing.T) {
firstEvent := makeEvent("first", "i am first", makeObjectReference("Pod", "my-pod", "my-ns"))
duplicateEvent := makeEvent("duplicate", "me again", makeObjectReference("Pod", "my-pod", "my-ns"))
uniqueEvent := makeEvent("unique", "snowflake", makeObjectReference("Pod", "my-pod", "my-ns"))
similarEvent := makeEvent("similar", "similar message", makeObjectReference("Pod", "my-pod", "my-ns"))
similarEvent.InvolvedObject.FieldPath = "spec.containers{container1}"
aggregateEvent := makeEvent(similarEvent.Reason, EventAggregatorByReasonMessageFunc(&similarEvent), similarEvent.InvolvedObject)
similarButDifferentContainerEvent := similarEvent
similarButDifferentContainerEvent.InvolvedObject.FieldPath = "spec.containers{container2}"
scenario := map[string]struct {
previousEvents []v1.Event
newEvent v1.Event
expectedEvent v1.Event
intervalSeconds int
expectedSkip bool
}{
"create-a-single-event": {
previousEvents: []v1.Event{},
newEvent: firstEvent,
expectedEvent: setCount(firstEvent, 1),
intervalSeconds: 5,
},
"the-same-event-should-just-count": {
previousEvents: makeEvents(1, duplicateEvent),
newEvent: duplicateEvent,
expectedEvent: setCount(duplicateEvent, 2),
intervalSeconds: 5,
},
"the-same-event-should-just-count-even-if-more-than-aggregate": {
previousEvents: makeEvents(defaultAggregateMaxEvents, duplicateEvent),
newEvent: duplicateEvent,
expectedEvent: setCount(duplicateEvent, defaultAggregateMaxEvents+1),
intervalSeconds: 30, // larger interval induces aggregation but not spam.
},
"the-same-event-is-spam-if-happens-too-frequently": {
previousEvents: makeEvents(defaultSpamBurst+1, duplicateEvent),
newEvent: duplicateEvent,
expectedSkip: true,
intervalSeconds: 1,
},
"create-many-unique-events": {
previousEvents: makeUniqueEvents(30),
newEvent: uniqueEvent,
expectedEvent: setCount(uniqueEvent, 1),
intervalSeconds: 5,
},
"similar-events-should-aggregate-event": {
previousEvents: makeSimilarEvents(defaultAggregateMaxEvents-1, similarEvent, similarEvent.Message),
newEvent: similarEvent,
expectedEvent: setCount(aggregateEvent, 1),
intervalSeconds: 5,
},
"similar-events-many-times-should-count-the-aggregate": {
previousEvents: makeSimilarEvents(defaultAggregateMaxEvents, similarEvent, similarEvent.Message),
newEvent: similarEvent,
expectedEvent: setCount(aggregateEvent, 2),
intervalSeconds: 5,
},
"events-from-different-containers-do-not-aggregate": {
previousEvents: makeEvents(1, similarButDifferentContainerEvent),
newEvent: similarEvent,
expectedEvent: setCount(similarEvent, 1),
intervalSeconds: 5,
},
"similar-events-whose-interval-is-greater-than-aggregate-interval-do-not-aggregate": {
previousEvents: makeSimilarEvents(defaultAggregateMaxEvents-1, similarEvent, similarEvent.Message),
newEvent: similarEvent,
expectedEvent: setCount(similarEvent, 1),
intervalSeconds: defaultAggregateIntervalInSeconds,
},
}
for testScenario, testInput := range scenario {
eventInterval := time.Duration(testInput.intervalSeconds) * time.Second
clock := clock.IntervalClock{Time: time.Now(), Duration: eventInterval}
correlator := NewEventCorrelator(&clock)
for i := range testInput.previousEvents {
event := testInput.previousEvents[i]
now := metav1.NewTime(clock.Now())
event.FirstTimestamp = now
event.LastTimestamp = now
result, err := correlator.EventCorrelate(&event)
if err != nil {
t.Errorf("scenario %v: unexpected error playing back prevEvents %v", testScenario, err)
}
// if we are skipping the event, we can avoid updating state
if !result.Skip {
correlator.UpdateState(result.Event)
}
}
// update the input to current clock value
now := metav1.NewTime(clock.Now())
testInput.newEvent.FirstTimestamp = now
testInput.newEvent.LastTimestamp = now
result, err := correlator.EventCorrelate(&testInput.newEvent)
if err != nil {
t.Errorf("scenario %v: unexpected error correlating input event %v", testScenario, err)
}
// verify we did not get skip from filter function unexpectedly...
if result.Skip != testInput.expectedSkip {
t.Errorf("scenario %v: expected skip %v, but got %v", testScenario, testInput.expectedSkip, result.Skip)
continue
}
// we wanted to actually skip, so no event is needed to validate
if testInput.expectedSkip {
continue
}
// validate event
_, err = validateEvent(testScenario, result.Event, &testInput.expectedEvent, t)
if err != nil {
t.Errorf("scenario %v: unexpected error validating result %v", testScenario, err)
}
}
}

View File

@@ -1,72 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reference
import (
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
type TestRuntimeObj struct {
metav1.TypeMeta
metav1.ObjectMeta
}
func (o *TestRuntimeObj) DeepCopyObject() runtime.Object {
panic("die")
}
func TestGetReferenceRefVersion(t *testing.T) {
tests := []struct {
name string
input *TestRuntimeObj
expectedRefVersion string
}{
{
name: "api from selflink",
input: &TestRuntimeObj{
ObjectMeta: metav1.ObjectMeta{SelfLink: "/api/v1/namespaces"},
},
expectedRefVersion: "v1",
},
{
name: "foo.group/v3 from selflink",
input: &TestRuntimeObj{
ObjectMeta: metav1.ObjectMeta{SelfLink: "/apis/foo.group/v3/namespaces"},
},
expectedRefVersion: "foo.group/v3",
},
}
scheme := runtime.NewScheme()
scheme.AddKnownTypes(schema.GroupVersion{Group: "this", Version: "is ignored"}, &TestRuntimeObj{})
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ref, err := GetReference(scheme, test.input)
if err != nil {
t.Fatal(err)
}
if test.expectedRefVersion != ref.APIVersion {
t.Errorf("expected %q, got %q", test.expectedRefVersion, ref.APIVersion)
}
})
}
}

View File

@@ -1,20 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package remotecommand adds support for executing commands in containers,
// with support for separate stdin, stdout, and stderr streams, as well as
// TTY.
package remotecommand // import "k8s.io/client-go/tools/remotecommand"

View File

@@ -1,55 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
import (
"fmt"
"io"
"io/ioutil"
"k8s.io/apimachinery/pkg/util/runtime"
)
// errorStreamDecoder interprets the data on the error channel and creates a go error object from it.
type errorStreamDecoder interface {
decode(message []byte) error
}
// watchErrorStream watches the errorStream for remote command error data,
// decodes it with the given errorStreamDecoder, sends the decoded error (or nil if the remote
// command exited successfully) to the returned error channel, and closes it.
// This function returns immediately.
func watchErrorStream(errorStream io.Reader, d errorStreamDecoder) chan error {
errorChan := make(chan error)
go func() {
defer runtime.HandleCrash()
message, err := ioutil.ReadAll(errorStream)
switch {
case err != nil && err != io.EOF:
errorChan <- fmt.Errorf("error reading from error stream: %s", err)
case len(message) > 0:
errorChan <- d.decode(message)
default:
errorChan <- nil
}
close(errorChan)
}()
return errorChan
}

View File

@@ -1,41 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
import (
"io"
)
// readerWrapper delegates to an io.Reader so that only the io.Reader interface is implemented,
// to keep io.Copy from doing things we don't want when copying from the reader to the data stream.
//
// If the Stdin io.Reader provided to remotecommand implements a WriteTo function (like bytes.Buffer does[1]),
// io.Copy calls that method[2] to attempt to write the entire buffer to the stream in one call.
// That results in an oversized call to spdystream.Stream#Write [3],
// which results in a single oversized data frame[4] that is too large.
//
// [1] https://golang.org/pkg/bytes/#Buffer.WriteTo
// [2] https://golang.org/pkg/io/#Copy
// [3] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/stream.go#L66-L73
// [4] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/spdy/write.go#L302-L304
type readerWrapper struct {
reader io.Reader
}
func (r readerWrapper) Read(p []byte) (int, error) {
return r.reader.Read(p)
}

View File

@@ -1,142 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
import (
"fmt"
"io"
"net/http"
"net/url"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/apimachinery/pkg/util/remotecommand"
restclient "k8s.io/client-go/rest"
spdy "k8s.io/client-go/transport/spdy"
)
// StreamOptions holds information pertaining to the current streaming session:
// input/output streams, if the client is requesting a TTY, and a terminal size queue to
// support terminal resizing.
type StreamOptions struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
Tty bool
TerminalSizeQueue TerminalSizeQueue
}
// Executor is an interface for transporting shell-style streams.
type Executor interface {
// Stream initiates the transport of the standard shell streams. It will transport any
// non-nil stream to a remote system, and return an error if a problem occurs. If tty
// is set, the stderr stream is not used (raw TTY manages stdout and stderr over the
// stdout stream).
Stream(options StreamOptions) error
}
type streamCreator interface {
CreateStream(headers http.Header) (httpstream.Stream, error)
}
type streamProtocolHandler interface {
stream(conn streamCreator) error
}
// streamExecutor handles transporting standard shell streams over an httpstream connection.
type streamExecutor struct {
upgrader spdy.Upgrader
transport http.RoundTripper
method string
url *url.URL
protocols []string
}
// NewSPDYExecutor connects to the provided server and upgrades the connection to
// multiplexed bidirectional streams.
func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) {
wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config)
if err != nil {
return nil, err
}
return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url)
}
// NewSPDYExecutorForTransports connects to the provided server using the given transport,
// upgrades the response using the given upgrader to multiplexed bidirectional streams.
func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) {
return NewSPDYExecutorForProtocols(
transport, upgrader, method, url,
remotecommand.StreamProtocolV4Name,
remotecommand.StreamProtocolV3Name,
remotecommand.StreamProtocolV2Name,
remotecommand.StreamProtocolV1Name,
)
}
// NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to
// multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most
// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports.
func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) {
return &streamExecutor{
upgrader: upgrader,
transport: transport,
method: method,
url: url,
protocols: protocols,
}, nil
}
// Stream opens a protocol streamer to the server and streams until a client closes
// the connection or the server disconnects.
func (e *streamExecutor) Stream(options StreamOptions) error {
req, err := http.NewRequest(e.method, e.url.String(), nil)
if err != nil {
return fmt.Errorf("error creating request: %v", err)
}
conn, protocol, err := spdy.Negotiate(
e.upgrader,
&http.Client{Transport: e.transport},
req,
e.protocols...,
)
if err != nil {
return err
}
defer conn.Close()
var streamer streamProtocolHandler
switch protocol {
case remotecommand.StreamProtocolV4Name:
streamer = newStreamProtocolV4(options)
case remotecommand.StreamProtocolV3Name:
streamer = newStreamProtocolV3(options)
case remotecommand.StreamProtocolV2Name:
streamer = newStreamProtocolV2(options)
case "":
klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name)
fallthrough
case remotecommand.StreamProtocolV1Name:
streamer = newStreamProtocolV1(options)
}
return streamer.stream(conn)
}

View File

@@ -1,33 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
// TerminalSize and TerminalSizeQueue was a part of k8s.io/kubernetes/pkg/util/term
// and were moved in order to decouple client from other term dependencies
// TerminalSize represents the width and height of a terminal.
type TerminalSize struct {
Width uint16
Height uint16
}
// TerminalSizeQueue is capable of returning terminal resize events as they occur.
type TerminalSizeQueue interface {
// Next returns the new terminal size after the terminal has been resized. It returns nil when
// monitoring has been stopped.
Next() *TerminalSize
}

View File

@@ -1,160 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/klog"
)
// streamProtocolV1 implements the first version of the streaming exec & attach
// protocol. This version has some bugs, such as not being able to detect when
// non-interactive stdin data has ended. See http://issues.k8s.io/13394 and
// http://issues.k8s.io/13395 for more details.
type streamProtocolV1 struct {
StreamOptions
errorStream httpstream.Stream
remoteStdin httpstream.Stream
remoteStdout httpstream.Stream
remoteStderr httpstream.Stream
}
var _ streamProtocolHandler = &streamProtocolV1{}
func newStreamProtocolV1(options StreamOptions) streamProtocolHandler {
return &streamProtocolV1{
StreamOptions: options,
}
}
func (p *streamProtocolV1) stream(conn streamCreator) error {
doneChan := make(chan struct{}, 2)
errorChan := make(chan error)
cp := func(s string, dst io.Writer, src io.Reader) {
klog.V(6).Infof("Copying %s", s)
defer klog.V(6).Infof("Done copying %s", s)
if _, err := io.Copy(dst, src); err != nil && err != io.EOF {
klog.Errorf("Error copying %s: %v", s, err)
}
if s == v1.StreamTypeStdout || s == v1.StreamTypeStderr {
doneChan <- struct{}{}
}
}
// set up all the streams first
var err error
headers := http.Header{}
headers.Set(v1.StreamType, v1.StreamTypeError)
p.errorStream, err = conn.CreateStream(headers)
if err != nil {
return err
}
defer p.errorStream.Reset()
// Create all the streams first, then start the copy goroutines. The server doesn't start its copy
// goroutines until it's received all of the streams. If the client creates the stdin stream and
// immediately begins copying stdin data to the server, it's possible to overwhelm and wedge the
// spdy frame handler in the server so that it is full of unprocessed frames. The frames aren't
// getting processed because the server hasn't started its copying, and it won't do that until it
// gets all the streams. By creating all the streams first, we ensure that the server is ready to
// process data before the client starts sending any. See https://issues.k8s.io/16373 for more info.
if p.Stdin != nil {
headers.Set(v1.StreamType, v1.StreamTypeStdin)
p.remoteStdin, err = conn.CreateStream(headers)
if err != nil {
return err
}
defer p.remoteStdin.Reset()
}
if p.Stdout != nil {
headers.Set(v1.StreamType, v1.StreamTypeStdout)
p.remoteStdout, err = conn.CreateStream(headers)
if err != nil {
return err
}
defer p.remoteStdout.Reset()
}
if p.Stderr != nil && !p.Tty {
headers.Set(v1.StreamType, v1.StreamTypeStderr)
p.remoteStderr, err = conn.CreateStream(headers)
if err != nil {
return err
}
defer p.remoteStderr.Reset()
}
// now that all the streams have been created, proceed with reading & copying
// always read from errorStream
go func() {
message, err := ioutil.ReadAll(p.errorStream)
if err != nil && err != io.EOF {
errorChan <- fmt.Errorf("Error reading from error stream: %s", err)
return
}
if len(message) > 0 {
errorChan <- fmt.Errorf("Error executing remote command: %s", message)
return
}
}()
if p.Stdin != nil {
// TODO this goroutine will never exit cleanly (the io.Copy never unblocks)
// because stdin is not closed until the process exits. If we try to call
// stdin.Close(), it returns no error but doesn't unblock the copy. It will
// exit when the process exits, instead.
go cp(v1.StreamTypeStdin, p.remoteStdin, readerWrapper{p.Stdin})
}
waitCount := 0
completedStreams := 0
if p.Stdout != nil {
waitCount++
go cp(v1.StreamTypeStdout, p.Stdout, p.remoteStdout)
}
if p.Stderr != nil && !p.Tty {
waitCount++
go cp(v1.StreamTypeStderr, p.Stderr, p.remoteStderr)
}
Loop:
for {
select {
case <-doneChan:
completedStreams++
if completedStreams == waitCount {
break Loop
}
case err := <-errorChan:
return err
}
}
return nil
}

View File

@@ -1,195 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"sync"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/runtime"
)
// streamProtocolV2 implements version 2 of the streaming protocol for attach
// and exec. The original streaming protocol was metav1. As a result, this
// version is referred to as version 2, even though it is the first actual
// numbered version.
type streamProtocolV2 struct {
StreamOptions
errorStream io.Reader
remoteStdin io.ReadWriteCloser
remoteStdout io.Reader
remoteStderr io.Reader
}
var _ streamProtocolHandler = &streamProtocolV2{}
func newStreamProtocolV2(options StreamOptions) streamProtocolHandler {
return &streamProtocolV2{
StreamOptions: options,
}
}
func (p *streamProtocolV2) createStreams(conn streamCreator) error {
var err error
headers := http.Header{}
// set up error stream
headers.Set(v1.StreamType, v1.StreamTypeError)
p.errorStream, err = conn.CreateStream(headers)
if err != nil {
return err
}
// set up stdin stream
if p.Stdin != nil {
headers.Set(v1.StreamType, v1.StreamTypeStdin)
p.remoteStdin, err = conn.CreateStream(headers)
if err != nil {
return err
}
}
// set up stdout stream
if p.Stdout != nil {
headers.Set(v1.StreamType, v1.StreamTypeStdout)
p.remoteStdout, err = conn.CreateStream(headers)
if err != nil {
return err
}
}
// set up stderr stream
if p.Stderr != nil && !p.Tty {
headers.Set(v1.StreamType, v1.StreamTypeStderr)
p.remoteStderr, err = conn.CreateStream(headers)
if err != nil {
return err
}
}
return nil
}
func (p *streamProtocolV2) copyStdin() {
if p.Stdin != nil {
var once sync.Once
// copy from client's stdin to container's stdin
go func() {
defer runtime.HandleCrash()
// if p.stdin is noninteractive, p.g. `echo abc | kubectl exec -i <pod> -- cat`, make sure
// we close remoteStdin as soon as the copy from p.stdin to remoteStdin finishes. Otherwise
// the executed command will remain running.
defer once.Do(func() { p.remoteStdin.Close() })
if _, err := io.Copy(p.remoteStdin, readerWrapper{p.Stdin}); err != nil {
runtime.HandleError(err)
}
}()
// read from remoteStdin until the stream is closed. this is essential to
// be able to exit interactive sessions cleanly and not leak goroutines or
// hang the client's terminal.
//
// TODO we aren't using go-dockerclient any more; revisit this to determine if it's still
// required by engine-api.
//
// go-dockerclient's current hijack implementation
// (https://github.com/fsouza/go-dockerclient/blob/89f3d56d93788dfe85f864a44f85d9738fca0670/client.go#L564)
// waits for all three streams (stdin/stdout/stderr) to finish copying
// before returning. When hijack finishes copying stdout/stderr, it calls
// Close() on its side of remoteStdin, which allows this copy to complete.
// When that happens, we must Close() on our side of remoteStdin, to
// allow the copy in hijack to complete, and hijack to return.
go func() {
defer runtime.HandleCrash()
defer once.Do(func() { p.remoteStdin.Close() })
// this "copy" doesn't actually read anything - it's just here to wait for
// the server to close remoteStdin.
if _, err := io.Copy(ioutil.Discard, p.remoteStdin); err != nil {
runtime.HandleError(err)
}
}()
}
}
func (p *streamProtocolV2) copyStdout(wg *sync.WaitGroup) {
if p.Stdout == nil {
return
}
wg.Add(1)
go func() {
defer runtime.HandleCrash()
defer wg.Done()
if _, err := io.Copy(p.Stdout, p.remoteStdout); err != nil {
runtime.HandleError(err)
}
}()
}
func (p *streamProtocolV2) copyStderr(wg *sync.WaitGroup) {
if p.Stderr == nil || p.Tty {
return
}
wg.Add(1)
go func() {
defer runtime.HandleCrash()
defer wg.Done()
if _, err := io.Copy(p.Stderr, p.remoteStderr); err != nil {
runtime.HandleError(err)
}
}()
}
func (p *streamProtocolV2) stream(conn streamCreator) error {
if err := p.createStreams(conn); err != nil {
return err
}
// now that all the streams have been created, proceed with reading & copying
errorChan := watchErrorStream(p.errorStream, &errorDecoderV2{})
p.copyStdin()
var wg sync.WaitGroup
p.copyStdout(&wg)
p.copyStderr(&wg)
// we're waiting for stdout/stderr to finish copying
wg.Wait()
// waits for errorStream to finish reading with an error or nil
return <-errorChan
}
// errorDecoderV2 interprets the error channel data as plain text.
type errorDecoderV2 struct{}
func (d *errorDecoderV2) decode(message []byte) error {
return fmt.Errorf("error executing remote command: %s", message)
}

View File

@@ -1,228 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
import (
"errors"
"io"
"net/http"
"strings"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/apimachinery/pkg/util/wait"
)
type fakeReader struct {
err error
}
func (r *fakeReader) Read([]byte) (int, error) { return 0, r.err }
type fakeWriter struct{}
func (*fakeWriter) Write([]byte) (int, error) { return 0, nil }
type fakeStreamCreator struct {
created map[string]bool
errors map[string]error
}
var _ streamCreator = &fakeStreamCreator{}
func (f *fakeStreamCreator) CreateStream(headers http.Header) (httpstream.Stream, error) {
streamType := headers.Get(v1.StreamType)
f.created[streamType] = true
return nil, f.errors[streamType]
}
func TestV2CreateStreams(t *testing.T) {
tests := []struct {
name string
stdin bool
stdinError error
stdout bool
stdoutError error
stderr bool
stderrError error
errorError error
tty bool
expectError bool
}{
{
name: "stdin error",
stdin: true,
stdinError: errors.New("stdin error"),
expectError: true,
},
{
name: "stdout error",
stdout: true,
stdoutError: errors.New("stdout error"),
expectError: true,
},
{
name: "stderr error",
stderr: true,
stderrError: errors.New("stderr error"),
expectError: true,
},
{
name: "error stream error",
stdin: true,
stdout: true,
stderr: true,
errorError: errors.New("error stream error"),
expectError: true,
},
{
name: "no errors",
stdin: true,
stdout: true,
stderr: true,
expectError: false,
},
{
name: "no errors, stderr & tty set, don't expect stderr",
stdin: true,
stdout: true,
stderr: true,
tty: true,
expectError: false,
},
}
for _, test := range tests {
conn := &fakeStreamCreator{
created: make(map[string]bool),
errors: map[string]error{
v1.StreamTypeStdin: test.stdinError,
v1.StreamTypeStdout: test.stdoutError,
v1.StreamTypeStderr: test.stderrError,
v1.StreamTypeError: test.errorError,
},
}
opts := StreamOptions{Tty: test.tty}
if test.stdin {
opts.Stdin = &fakeReader{}
}
if test.stdout {
opts.Stdout = &fakeWriter{}
}
if test.stderr {
opts.Stderr = &fakeWriter{}
}
h := newStreamProtocolV2(opts).(*streamProtocolV2)
err := h.createStreams(conn)
if test.expectError {
if err == nil {
t.Errorf("%s: expected error", test.name)
continue
}
if e, a := test.stdinError, err; test.stdinError != nil && e != a {
t.Errorf("%s: expected %v, got %v", test.name, e, a)
}
if e, a := test.stdoutError, err; test.stdoutError != nil && e != a {
t.Errorf("%s: expected %v, got %v", test.name, e, a)
}
if e, a := test.stderrError, err; test.stderrError != nil && e != a {
t.Errorf("%s: expected %v, got %v", test.name, e, a)
}
if e, a := test.errorError, err; test.errorError != nil && e != a {
t.Errorf("%s: expected %v, got %v", test.name, e, a)
}
continue
}
if !test.expectError && err != nil {
t.Errorf("%s: unexpected error: %v", test.name, err)
continue
}
if test.stdin && !conn.created[v1.StreamTypeStdin] {
t.Errorf("%s: expected stdin stream", test.name)
}
if test.stdout && !conn.created[v1.StreamTypeStdout] {
t.Errorf("%s: expected stdout stream", test.name)
}
if test.stderr {
if test.tty && conn.created[v1.StreamTypeStderr] {
t.Errorf("%s: unexpected stderr stream because tty is set", test.name)
} else if !test.tty && !conn.created[v1.StreamTypeStderr] {
t.Errorf("%s: expected stderr stream", test.name)
}
}
if !conn.created[v1.StreamTypeError] {
t.Errorf("%s: expected error stream", test.name)
}
}
}
func TestV2ErrorStreamReading(t *testing.T) {
tests := []struct {
name string
stream io.Reader
expectedError error
}{
{
name: "error reading from stream",
stream: &fakeReader{errors.New("foo")},
expectedError: errors.New("error reading from error stream: foo"),
},
{
name: "stream returns an error",
stream: strings.NewReader("some error"),
expectedError: errors.New("error executing remote command: some error"),
},
}
for _, test := range tests {
h := newStreamProtocolV2(StreamOptions{}).(*streamProtocolV2)
h.errorStream = test.stream
ch := watchErrorStream(h.errorStream, &errorDecoderV2{})
if ch == nil {
t.Fatalf("%s: unexpected nil channel", test.name)
}
var err error
select {
case err = <-ch:
case <-time.After(wait.ForeverTestTimeout):
t.Fatalf("%s: timed out", test.name)
}
if test.expectedError != nil {
if err == nil {
t.Errorf("%s: expected an error", test.name)
} else if e, a := test.expectedError, err; e.Error() != a.Error() {
t.Errorf("%s: expected %q, got %q", test.name, e, a)
}
continue
}
if test.expectedError == nil && err != nil {
t.Errorf("%s: unexpected error: %v", test.name, err)
continue
}
}
}

View File

@@ -1,111 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
import (
"encoding/json"
"io"
"net/http"
"sync"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/runtime"
)
// streamProtocolV3 implements version 3 of the streaming protocol for attach
// and exec. This version adds support for resizing the container's terminal.
type streamProtocolV3 struct {
*streamProtocolV2
resizeStream io.Writer
}
var _ streamProtocolHandler = &streamProtocolV3{}
func newStreamProtocolV3(options StreamOptions) streamProtocolHandler {
return &streamProtocolV3{
streamProtocolV2: newStreamProtocolV2(options).(*streamProtocolV2),
}
}
func (p *streamProtocolV3) createStreams(conn streamCreator) error {
// set up the streams from v2
if err := p.streamProtocolV2.createStreams(conn); err != nil {
return err
}
// set up resize stream
if p.Tty {
headers := http.Header{}
headers.Set(v1.StreamType, v1.StreamTypeResize)
var err error
p.resizeStream, err = conn.CreateStream(headers)
if err != nil {
return err
}
}
return nil
}
func (p *streamProtocolV3) handleResizes() {
if p.resizeStream == nil || p.TerminalSizeQueue == nil {
return
}
go func() {
defer runtime.HandleCrash()
encoder := json.NewEncoder(p.resizeStream)
for {
size := p.TerminalSizeQueue.Next()
if size == nil {
return
}
if err := encoder.Encode(&size); err != nil {
runtime.HandleError(err)
}
}
}()
}
func (p *streamProtocolV3) stream(conn streamCreator) error {
if err := p.createStreams(conn); err != nil {
return err
}
// now that all the streams have been created, proceed with reading & copying
errorChan := watchErrorStream(p.errorStream, &errorDecoderV3{})
p.handleResizes()
p.copyStdin()
var wg sync.WaitGroup
p.copyStdout(&wg)
p.copyStderr(&wg)
// we're waiting for stdout/stderr to finish copying
wg.Wait()
// waits for errorStream to finish reading with an error or nil
return <-errorChan
}
type errorDecoderV3 struct {
errorDecoderV2
}

View File

@@ -1,119 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
import (
"encoding/json"
"errors"
"fmt"
"strconv"
"sync"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/remotecommand"
"k8s.io/client-go/util/exec"
)
// streamProtocolV4 implements version 4 of the streaming protocol for attach
// and exec. This version adds support for exit codes on the error stream through
// the use of metav1.Status instead of plain text messages.
type streamProtocolV4 struct {
*streamProtocolV3
}
var _ streamProtocolHandler = &streamProtocolV4{}
func newStreamProtocolV4(options StreamOptions) streamProtocolHandler {
return &streamProtocolV4{
streamProtocolV3: newStreamProtocolV3(options).(*streamProtocolV3),
}
}
func (p *streamProtocolV4) createStreams(conn streamCreator) error {
return p.streamProtocolV3.createStreams(conn)
}
func (p *streamProtocolV4) handleResizes() {
p.streamProtocolV3.handleResizes()
}
func (p *streamProtocolV4) stream(conn streamCreator) error {
if err := p.createStreams(conn); err != nil {
return err
}
// now that all the streams have been created, proceed with reading & copying
errorChan := watchErrorStream(p.errorStream, &errorDecoderV4{})
p.handleResizes()
p.copyStdin()
var wg sync.WaitGroup
p.copyStdout(&wg)
p.copyStderr(&wg)
// we're waiting for stdout/stderr to finish copying
wg.Wait()
// waits for errorStream to finish reading with an error or nil
return <-errorChan
}
// errorDecoderV4 interprets the json-marshaled metav1.Status on the error channel
// and creates an exec.ExitError from it.
type errorDecoderV4 struct{}
func (d *errorDecoderV4) decode(message []byte) error {
status := metav1.Status{}
err := json.Unmarshal(message, &status)
if err != nil {
return fmt.Errorf("error stream protocol error: %v in %q", err, string(message))
}
switch status.Status {
case metav1.StatusSuccess:
return nil
case metav1.StatusFailure:
if status.Reason == remotecommand.NonZeroExitCodeReason {
if status.Details == nil {
return errors.New("error stream protocol error: details must be set")
}
for i := range status.Details.Causes {
c := &status.Details.Causes[i]
if c.Type != remotecommand.ExitCodeCauseType {
continue
}
rc, err := strconv.ParseUint(c.Message, 10, 8)
if err != nil {
return fmt.Errorf("error stream protocol error: invalid exit code value %q", c.Message)
}
return exec.CodeExitError{
Err: fmt.Errorf("command terminated with exit code %d", rc),
Code: int(rc),
}
}
return fmt.Errorf("error stream protocol error: no %s cause given", remotecommand.ExitCodeCauseType)
}
default:
return errors.New("error stream protocol error: unknown error")
}
return fmt.Errorf(status.Message)
}

View File

@@ -1,71 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
import (
"fmt"
"testing"
)
func TestV4ErrorDecoder(t *testing.T) {
dec := errorDecoderV4{}
type Test struct {
message string
err string
}
for _, test := range []Test{
{
message: "{}",
err: "error stream protocol error: unknown error",
},
{
message: "{",
err: "error stream protocol error: unexpected end of JSON input in \"{\"",
},
{
message: `{"status": "Success" }`,
err: "",
},
{
message: `{"status": "Failure", "message": "foobar" }`,
err: "foobar",
},
{
message: `{"status": "Failure", "message": "foobar", "reason": "NonZeroExitCode", "details": {"causes": [{"reason": "foo"}] } }`,
err: "error stream protocol error: no ExitCode cause given",
},
{
message: `{"status": "Failure", "message": "foobar", "reason": "NonZeroExitCode", "details": {"causes": [{"reason": "ExitCode"}] } }`,
err: "error stream protocol error: invalid exit code value \"\"",
},
{
message: `{"status": "Failure", "message": "foobar", "reason": "NonZeroExitCode", "details": {"causes": [{"reason": "ExitCode", "message": "42"}] } }`,
err: "command terminated with exit code 42",
},
} {
err := dec.decode([]byte(test.message))
want := test.err
if want == "" {
want = "<nil>"
}
if got := fmt.Sprintf("%v", err); got != want {
t.Errorf("wrong error for message %q: want=%q, got=%q", test.message, want, got)
}
}
}

View File

@@ -1,114 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package watch
import (
"sync"
"sync/atomic"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
)
func newTicketer() *ticketer {
return &ticketer{
cond: sync.NewCond(&sync.Mutex{}),
}
}
type ticketer struct {
counter uint64
cond *sync.Cond
current uint64
}
func (t *ticketer) GetTicket() uint64 {
// -1 to start from 0
return atomic.AddUint64(&t.counter, 1) - 1
}
func (t *ticketer) WaitForTicket(ticket uint64, f func()) {
t.cond.L.Lock()
defer t.cond.L.Unlock()
for ticket != t.current {
t.cond.Wait()
}
f()
t.current++
t.cond.Broadcast()
}
// NewIndexerInformerWatcher will create an IndexerInformer and wrap it into watch.Interface
// so you can use it anywhere where you'd have used a regular Watcher returned from Watch method.
func NewIndexerInformerWatcher(lw cache.ListerWatcher, objType runtime.Object) (cache.Indexer, cache.Controller, watch.Interface) {
ch := make(chan watch.Event)
w := watch.NewProxyWatcher(ch)
t := newTicketer()
indexer, informer := cache.NewIndexerInformer(lw, objType, 0, cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
go t.WaitForTicket(t.GetTicket(), func() {
select {
case ch <- watch.Event{
Type: watch.Added,
Object: obj.(runtime.Object),
}:
case <-w.StopChan():
}
})
},
UpdateFunc: func(old, new interface{}) {
go t.WaitForTicket(t.GetTicket(), func() {
select {
case ch <- watch.Event{
Type: watch.Modified,
Object: new.(runtime.Object),
}:
case <-w.StopChan():
}
})
},
DeleteFunc: func(obj interface{}) {
go t.WaitForTicket(t.GetTicket(), func() {
staleObj, stale := obj.(cache.DeletedFinalStateUnknown)
if stale {
// We have no means of passing the additional information down using watch API based on watch.Event
// but the caller can filter such objects by checking if metadata.deletionTimestamp is set
obj = staleObj
}
select {
case ch <- watch.Event{
Type: watch.Deleted,
Object: obj.(runtime.Object),
}:
case <-w.StopChan():
}
})
},
}, cache.Indexers{})
go func() {
informer.Run(w.StopChan())
}()
return indexer, informer, w
}

View File

@@ -1,236 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package watch
import (
"math/rand"
"reflect"
"sort"
"testing"
"time"
"github.com/davecgh/go-spew/spew"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/watch"
fakeclientset "k8s.io/client-go/kubernetes/fake"
testcore "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
)
type byEventTypeAndName []watch.Event
func (a byEventTypeAndName) Len() int { return len(a) }
func (a byEventTypeAndName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byEventTypeAndName) Less(i, j int) bool {
if a[i].Type < a[j].Type {
return true
}
if a[i].Type > a[j].Type {
return false
}
return a[i].Object.(*corev1.Secret).Name < a[j].Object.(*corev1.Secret).Name
}
func TestTicketer(t *testing.T) {
tg := newTicketer()
const numTickets = 100 // current golang limit for race detector is 8192 simultaneously alive goroutines
var tickets []uint64
for i := 0; i < numTickets; i++ {
ticket := tg.GetTicket()
tickets = append(tickets, ticket)
exp, got := uint64(i), ticket
if got != exp {
t.Fatalf("expected ticket %d, got %d", exp, got)
}
}
// shuffle tickets
rand.Shuffle(len(tickets), func(i, j int) {
tickets[i], tickets[j] = tickets[j], tickets[i]
})
res := make(chan uint64, len(tickets))
for _, ticket := range tickets {
go func(ticket uint64) {
time.Sleep(time.Duration(rand.Intn(50)) * time.Millisecond)
tg.WaitForTicket(ticket, func() {
res <- ticket
})
}(ticket)
}
for i := 0; i < numTickets; i++ {
exp, got := uint64(i), <-res
if got != exp {
t.Fatalf("expected ticket %d, got %d", exp, got)
}
}
}
func TestNewInformerWatcher(t *testing.T) {
// Make sure there are no 2 same types of events on a secret with the same name or that might be flaky.
tt := []struct {
name string
objects []runtime.Object
events []watch.Event
}{
{
name: "basic test",
objects: []runtime.Object{
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-1",
},
StringData: map[string]string{
"foo-1": "initial",
},
},
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-2",
},
StringData: map[string]string{
"foo-2": "initial",
},
},
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-3",
},
StringData: map[string]string{
"foo-3": "initial",
},
},
},
events: []watch.Event{
{
Type: watch.Added,
Object: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-4",
},
StringData: map[string]string{
"foo-4": "initial",
},
},
},
{
Type: watch.Modified,
Object: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-2",
},
StringData: map[string]string{
"foo-2": "new",
},
},
},
{
Type: watch.Deleted,
Object: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-3",
},
},
},
},
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
var expected []watch.Event
for _, o := range tc.objects {
expected = append(expected, watch.Event{
Type: watch.Added,
Object: o.DeepCopyObject(),
})
}
for _, e := range tc.events {
expected = append(expected, *e.DeepCopy())
}
fake := fakeclientset.NewSimpleClientset(tc.objects...)
fakeWatch := watch.NewFakeWithChanSize(len(tc.events), false)
fake.PrependWatchReactor("secrets", testcore.DefaultWatchReactor(fakeWatch, nil))
for _, e := range tc.events {
fakeWatch.Action(e.Type, e.Object)
}
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return fake.Core().Secrets("").List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return fake.Core().Secrets("").Watch(options)
},
}
_, _, w := NewIndexerInformerWatcher(lw, &corev1.Secret{})
var result []watch.Event
loop:
for {
var event watch.Event
var ok bool
select {
case event, ok = <-w.ResultChan():
if !ok {
t.Errorf("Failed to read event: channel is already closed!")
return
}
result = append(result, *event.DeepCopy())
case <-time.After(time.Second * 1):
// All the events are buffered -> this means we are done
// Also the one sec will make sure that we would detect RetryWatcher's incorrect behaviour after last event
break loop
}
}
// Informers don't guarantee event order so we need to sort these arrays to compare them
sort.Sort(byEventTypeAndName(expected))
sort.Sort(byEventTypeAndName(result))
if !reflect.DeepEqual(expected, result) {
t.Error(spew.Errorf("\nexpected: %#v,\ngot: %#v,\ndiff: %s", expected, result, diff.ObjectReflectDiff(expected, result)))
return
}
// Fill in some data to test watch closing while there are some events to be read
for _, e := range tc.events {
fakeWatch.Action(e.Type, e.Object)
}
// Stop before reading all the data to make sure the informer can deal with closed channel
w.Stop()
// Wait a bit to see if the informer won't panic
// TODO: Try to figure out a more reliable mechanism than time.Sleep (https://github.com/kubernetes/kubernetes/pull/50102/files#r184716591)
time.Sleep(1 * time.Second)
})
}
}

View File

@@ -1,225 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package watch
import (
"context"
"errors"
"fmt"
"time"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"
)
// PreconditionFunc returns true if the condition has been reached, false if it has not been reached yet,
// or an error if the condition failed or detected an error state.
type PreconditionFunc func(store cache.Store) (bool, error)
// ConditionFunc returns true if the condition has been reached, false if it has not been reached yet,
// or an error if the condition cannot be checked and should terminate. In general, it is better to define
// level driven conditions over edge driven conditions (pod has ready=true, vs pod modified and ready changed
// from false to true).
type ConditionFunc func(event watch.Event) (bool, error)
// ErrWatchClosed is returned when the watch channel is closed before timeout in UntilWithoutRetry.
var ErrWatchClosed = errors.New("watch closed before UntilWithoutRetry timeout")
// UntilWithoutRetry reads items from the watch until each provided condition succeeds, and then returns the last watch
// encountered. The first condition that returns an error terminates the watch (and the event is also returned).
// If no event has been received, the returned event will be nil.
// Conditions are satisfied sequentially so as to provide a useful primitive for higher level composition.
// Waits until context deadline or until context is canceled.
//
// Warning: Unless you have a very specific use case (probably a special Watcher) don't use this function!!!
// Warning: This will fail e.g. on API timeouts and/or 'too old resource version' error.
// Warning: You are most probably looking for a function *Until* or *UntilWithSync* below,
// Warning: solving such issues.
// TODO: Consider making this function private to prevent misuse when the other occurrences in our codebase are gone.
func UntilWithoutRetry(ctx context.Context, watcher watch.Interface, conditions ...ConditionFunc) (*watch.Event, error) {
ch := watcher.ResultChan()
defer watcher.Stop()
var lastEvent *watch.Event
for _, condition := range conditions {
// check the next condition against the previous event and short circuit waiting for the next watch
if lastEvent != nil {
done, err := condition(*lastEvent)
if err != nil {
return lastEvent, err
}
if done {
continue
}
}
ConditionSucceeded:
for {
select {
case event, ok := <-ch:
if !ok {
return lastEvent, ErrWatchClosed
}
lastEvent = &event
done, err := condition(event)
if err != nil {
return lastEvent, err
}
if done {
break ConditionSucceeded
}
case <-ctx.Done():
return lastEvent, wait.ErrWaitTimeout
}
}
}
return lastEvent, nil
}
// UntilWithSync creates an informer from lw, optionally checks precondition when the store is synced,
// and watches the output until each provided condition succeeds, in a way that is identical
// to function UntilWithoutRetry. (See above.)
// UntilWithSync can deal with all errors like API timeout, lost connections and 'Resource version too old'.
// It is the only function that can recover from 'Resource version too old', Until and UntilWithoutRetry will
// just fail in that case. On the other hand it can't provide you with guarantees as strong as using simple
// Watch method with Until. It can skip some intermediate events in case of watch function failing but it will
// re-list to recover and you always get an event, if there has been a change, after recovery.
// Also with the current implementation based on DeltaFIFO, order of the events you receive is guaranteed only for
// particular object, not between more of them even it's the same resource.
// The most frequent usage would be a command that needs to watch the "state of the world" and should't fail, like:
// waiting for object reaching a state, "small" controllers, ...
func UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime.Object, precondition PreconditionFunc, conditions ...ConditionFunc) (*watch.Event, error) {
indexer, informer, watcher := NewIndexerInformerWatcher(lw, objType)
// Proxy watcher can be stopped multiple times so it's fine to use defer here to cover alternative branches and
// let UntilWithoutRetry to stop it
defer watcher.Stop()
if precondition != nil {
if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) {
return nil, fmt.Errorf("UntilWithSync: unable to sync caches: %v", ctx.Err())
}
done, err := precondition(indexer)
if err != nil {
return nil, err
}
if done {
return nil, nil
}
}
return UntilWithoutRetry(ctx, watcher, conditions...)
}
// ContextWithOptionalTimeout wraps context.WithTimeout and handles infinite timeouts expressed as 0 duration.
func ContextWithOptionalTimeout(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) {
if timeout < 0 {
// This should be handled in validation
klog.Errorf("Timeout for context shall not be negative!")
timeout = 0
}
if timeout == 0 {
return context.WithCancel(parent)
}
return context.WithTimeout(parent, timeout)
}
// ListWatchUntil checks the provided conditions against the items returned by the list watcher, returning wait.ErrWaitTimeout
// if timeout is exceeded without all conditions returning true, or an error if an error occurs.
// TODO: check for watch expired error and retry watch from latest point? Same issue exists for Until.
// TODO: remove when no longer used
//
// Deprecated: Use UntilWithSync instead.
func ListWatchUntil(timeout time.Duration, lw cache.ListerWatcher, conditions ...ConditionFunc) (*watch.Event, error) {
if len(conditions) == 0 {
return nil, nil
}
list, err := lw.List(metav1.ListOptions{})
if err != nil {
return nil, err
}
initialItems, err := meta.ExtractList(list)
if err != nil {
return nil, err
}
// use the initial items as simulated "adds"
var lastEvent *watch.Event
currIndex := 0
passedConditions := 0
for _, condition := range conditions {
// check the next condition against the previous event and short circuit waiting for the next watch
if lastEvent != nil {
done, err := condition(*lastEvent)
if err != nil {
return lastEvent, err
}
if done {
passedConditions = passedConditions + 1
continue
}
}
ConditionSucceeded:
for currIndex < len(initialItems) {
lastEvent = &watch.Event{Type: watch.Added, Object: initialItems[currIndex]}
currIndex++
done, err := condition(*lastEvent)
if err != nil {
return lastEvent, err
}
if done {
passedConditions = passedConditions + 1
break ConditionSucceeded
}
}
}
if passedConditions == len(conditions) {
return lastEvent, nil
}
remainingConditions := conditions[passedConditions:]
metaObj, err := meta.ListAccessor(list)
if err != nil {
return nil, err
}
currResourceVersion := metaObj.GetResourceVersion()
watchInterface, err := lw.Watch(metav1.ListOptions{ResourceVersion: currResourceVersion})
if err != nil {
return nil, err
}
ctx, cancel := ContextWithOptionalTimeout(context.Background(), timeout)
defer cancel()
evt, err := UntilWithoutRetry(ctx, watchInterface, remainingConditions...)
if err == ErrWatchClosed {
// present a consistent error interface to callers
err = wait.ErrWaitTimeout
}
return evt, err
}

View File

@@ -1,303 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package watch
import (
"context"
"errors"
"reflect"
"strings"
"testing"
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
fakeclient "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/cache"
)
type fakePod struct {
name string
}
func (obj *fakePod) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind }
func (obj *fakePod) DeepCopyObject() runtime.Object { panic("DeepCopyObject not supported by fakePod") }
func TestUntil(t *testing.T) {
fw := watch.NewFake()
go func() {
var obj *fakePod
fw.Add(obj)
fw.Modify(obj)
}()
conditions := []ConditionFunc{
func(event watch.Event) (bool, error) { return event.Type == watch.Added, nil },
func(event watch.Event) (bool, error) { return event.Type == watch.Modified, nil },
}
ctx, _ := context.WithTimeout(context.Background(), time.Minute)
lastEvent, err := UntilWithoutRetry(ctx, fw, conditions...)
if err != nil {
t.Fatalf("expected nil error, got %#v", err)
}
if lastEvent == nil {
t.Fatal("expected an event")
}
if lastEvent.Type != watch.Modified {
t.Fatalf("expected MODIFIED event type, got %v", lastEvent.Type)
}
if got, isPod := lastEvent.Object.(*fakePod); !isPod {
t.Fatalf("expected a pod event, got %#v", got)
}
}
func TestUntilMultipleConditions(t *testing.T) {
fw := watch.NewFake()
go func() {
var obj *fakePod
fw.Add(obj)
}()
conditions := []ConditionFunc{
func(event watch.Event) (bool, error) { return event.Type == watch.Added, nil },
func(event watch.Event) (bool, error) { return event.Type == watch.Added, nil },
}
ctx, _ := context.WithTimeout(context.Background(), time.Minute)
lastEvent, err := UntilWithoutRetry(ctx, fw, conditions...)
if err != nil {
t.Fatalf("expected nil error, got %#v", err)
}
if lastEvent == nil {
t.Fatal("expected an event")
}
if lastEvent.Type != watch.Added {
t.Fatalf("expected MODIFIED event type, got %v", lastEvent.Type)
}
if got, isPod := lastEvent.Object.(*fakePod); !isPod {
t.Fatalf("expected a pod event, got %#v", got)
}
}
func TestUntilMultipleConditionsFail(t *testing.T) {
fw := watch.NewFake()
go func() {
var obj *fakePod
fw.Add(obj)
}()
conditions := []ConditionFunc{
func(event watch.Event) (bool, error) { return event.Type == watch.Added, nil },
func(event watch.Event) (bool, error) { return event.Type == watch.Added, nil },
func(event watch.Event) (bool, error) { return event.Type == watch.Deleted, nil },
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
lastEvent, err := UntilWithoutRetry(ctx, fw, conditions...)
if err != wait.ErrWaitTimeout {
t.Fatalf("expected ErrWaitTimeout error, got %#v", err)
}
if lastEvent == nil {
t.Fatal("expected an event")
}
if lastEvent.Type != watch.Added {
t.Fatalf("expected ADDED event type, got %v", lastEvent.Type)
}
if got, isPod := lastEvent.Object.(*fakePod); !isPod {
t.Fatalf("expected a pod event, got %#v", got)
}
}
func TestUntilTimeout(t *testing.T) {
fw := watch.NewFake()
go func() {
var obj *fakePod
fw.Add(obj)
fw.Modify(obj)
}()
conditions := []ConditionFunc{
func(event watch.Event) (bool, error) {
return event.Type == watch.Added, nil
},
func(event watch.Event) (bool, error) {
return event.Type == watch.Modified, nil
},
}
lastEvent, err := UntilWithoutRetry(context.Background(), fw, conditions...)
if err != nil {
t.Fatalf("expected nil error, got %#v", err)
}
if lastEvent == nil {
t.Fatal("expected an event")
}
if lastEvent.Type != watch.Modified {
t.Fatalf("expected MODIFIED event type, got %v", lastEvent.Type)
}
if got, isPod := lastEvent.Object.(*fakePod); !isPod {
t.Fatalf("expected a pod event, got %#v", got)
}
}
func TestUntilErrorCondition(t *testing.T) {
fw := watch.NewFake()
go func() {
var obj *fakePod
fw.Add(obj)
}()
expected := "something bad"
conditions := []ConditionFunc{
func(event watch.Event) (bool, error) { return event.Type == watch.Added, nil },
func(event watch.Event) (bool, error) { return false, errors.New(expected) },
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
_, err := UntilWithoutRetry(ctx, fw, conditions...)
if err == nil {
t.Fatal("expected an error")
}
if !strings.Contains(err.Error(), expected) {
t.Fatalf("expected %q in error string, got %q", expected, err.Error())
}
}
func TestUntilWithSync(t *testing.T) {
// FIXME: test preconditions
tt := []struct {
name string
lw *cache.ListWatch
preconditionFunc PreconditionFunc
conditionFunc ConditionFunc
expectedErr error
expectedEvent *watch.Event
}{
{
name: "doesn't wait for sync with no precondition",
lw: &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
select {}
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
select {}
},
},
preconditionFunc: nil,
conditionFunc: func(e watch.Event) (bool, error) {
return true, nil
},
expectedErr: errors.New("timed out waiting for the condition"),
expectedEvent: nil,
},
{
name: "waits indefinitely with precondition if it can't sync",
lw: &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
select {}
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
select {}
},
},
preconditionFunc: func(store cache.Store) (bool, error) {
return true, nil
},
conditionFunc: func(e watch.Event) (bool, error) {
return true, nil
},
expectedErr: errors.New("UntilWithSync: unable to sync caches: context deadline exceeded"),
expectedEvent: nil,
},
{
name: "precondition can stop the loop",
lw: func() *cache.ListWatch {
fakeclient := fakeclient.NewSimpleClientset(&corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "first"}})
return &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return fakeclient.CoreV1().Secrets("").List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return fakeclient.CoreV1().Secrets("").Watch(options)
},
}
}(),
preconditionFunc: func(store cache.Store) (bool, error) {
_, exists, err := store.Get(&metav1.ObjectMeta{Namespace: "", Name: "first"})
if err != nil {
return true, err
}
if exists {
return true, nil
}
return false, nil
},
conditionFunc: func(e watch.Event) (bool, error) {
return true, errors.New("should never reach this")
},
expectedErr: nil,
expectedEvent: nil,
},
{
name: "precondition lets it proceed to regular condition",
lw: func() *cache.ListWatch {
fakeclient := fakeclient.NewSimpleClientset(&corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "first"}})
return &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return fakeclient.CoreV1().Secrets("").List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return fakeclient.CoreV1().Secrets("").Watch(options)
},
}
}(),
preconditionFunc: func(store cache.Store) (bool, error) {
return false, nil
},
conditionFunc: func(e watch.Event) (bool, error) {
if e.Type == watch.Added {
return true, nil
}
panic("no other events are expected")
},
expectedErr: nil,
expectedEvent: &watch.Event{Type: watch.Added, Object: &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "first"}}},
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
// Informer waits for caches to sync by polling in 100ms intervals,
// timeout needs to be reasonably higher
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancel()
event, err := UntilWithSync(ctx, tc.lw, &corev1.Secret{}, tc.preconditionFunc, tc.conditionFunc)
if !reflect.DeepEqual(err, tc.expectedErr) {
t.Errorf("expected error %#v, got %#v", tc.expectedErr, err)
}
if !reflect.DeepEqual(event, tc.expectedEvent) {
t.Errorf("expected event %#v, got %#v", tc.expectedEvent, event)
}
})
}
}