Bumping k8s dependencies to 1.13
This commit is contained in:
15
vendor/k8s.io/kubernetes/test/integration/BUILD
generated
vendored
15
vendor/k8s.io/kubernetes/test/integration/BUILD
generated
vendored
@@ -13,14 +13,14 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration",
|
||||
deps = [
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/transport:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -35,14 +35,17 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//test/integration/apimachinery:all-srcs",
|
||||
"//test/integration/apiserver:all-srcs",
|
||||
"//test/integration/auth:all-srcs",
|
||||
"//test/integration/benchmark/jsonify:all-srcs",
|
||||
"//test/integration/client:all-srcs",
|
||||
"//test/integration/configmap:all-srcs",
|
||||
"//test/integration/controllermanager:all-srcs",
|
||||
"//test/integration/daemonset:all-srcs",
|
||||
"//test/integration/defaulttolerationseconds:all-srcs",
|
||||
"//test/integration/deployment:all-srcs",
|
||||
"//test/integration/dryrun:all-srcs",
|
||||
"//test/integration/etcd:all-srcs",
|
||||
"//test/integration/evictions:all-srcs",
|
||||
"//test/integration/examples:all-srcs",
|
||||
|
38
vendor/k8s.io/kubernetes/test/integration/apimachinery/BUILD
generated
vendored
Normal file
38
vendor/k8s.io/kubernetes/test/integration/apimachinery/BUILD
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_test")
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"main_test.go",
|
||||
"watch_restart_test.go",
|
||||
],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
27
vendor/k8s.io/kubernetes/test/integration/apimachinery/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/apimachinery/main_test.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apimachinery
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
258
vendor/k8s.io/kubernetes/test/integration/apimachinery/watch_restart_test.go
generated
vendored
Normal file
258
vendor/k8s.io/kubernetes/test/integration/apimachinery/watch_restart_test.go
generated
vendored
Normal file
@@ -0,0 +1,258 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apimachinery
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func noopNormalization(output []string) []string {
|
||||
return output
|
||||
}
|
||||
|
||||
func normalizeInformerOutputFunc(initialVal string) func(output []string) []string {
|
||||
return func(output []string) []string {
|
||||
result := make([]string, 0, len(output))
|
||||
|
||||
// Removes initial value and all of its direct repetitions
|
||||
lastVal := initialVal
|
||||
for _, v := range output {
|
||||
// Make values unique as informer(List+Watch) duplicates some events
|
||||
if v == lastVal {
|
||||
continue
|
||||
}
|
||||
result = append(result, v)
|
||||
lastVal = v
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatchRestartsIfTimeoutNotReached(t *testing.T) {
|
||||
// Has to be longer than 5 seconds
|
||||
timeout := 2 * time.Minute
|
||||
|
||||
// Set up a master
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
// Timeout is set random between MinRequestTimeout and 2x
|
||||
masterConfig.GenericConfig.MinRequestTimeout = int(timeout.Seconds()) / 4
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
config := &restclient.Config{
|
||||
Host: s.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[corev1.GroupName].GroupVersion()},
|
||||
}
|
||||
|
||||
namespaceObject := framework.CreateTestingNamespace("retry-watch", s, t)
|
||||
defer framework.DeleteTestingNamespace(namespaceObject, s, t)
|
||||
|
||||
getListFunc := func(c *kubernetes.Clientset, secret *v1.Secret) func(options metav1.ListOptions) *v1.SecretList {
|
||||
return func(options metav1.ListOptions) *v1.SecretList {
|
||||
options.FieldSelector = fields.OneTermEqualSelector("metadata.name", secret.Name).String()
|
||||
res, err := c.CoreV1().Secrets(secret.Namespace).List(options)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list Secrets: %v", err)
|
||||
}
|
||||
return res
|
||||
}
|
||||
}
|
||||
|
||||
getWatchFunc := func(c *kubernetes.Clientset, secret *v1.Secret) func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = fields.OneTermEqualSelector("metadata.name", secret.Name).String()
|
||||
res, err := c.CoreV1().Secrets(secret.Namespace).Watch(options)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create a watcher on Secrets: %v", err)
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
}
|
||||
|
||||
generateEvents := func(t *testing.T, c *kubernetes.Clientset, secret *v1.Secret, referenceOutput *[]string, stopChan chan struct{}, stoppedChan chan struct{}) {
|
||||
defer close(stoppedChan)
|
||||
counter := 0
|
||||
|
||||
// These 5 seconds are here to protect against a race at the end when we could write something there at the same time as watch.Until ends
|
||||
softTimeout := timeout - 5*time.Second
|
||||
if softTimeout < 0 {
|
||||
panic("Timeout has to be grater than 5 seconds!")
|
||||
}
|
||||
endChannel := time.After(softTimeout)
|
||||
for {
|
||||
select {
|
||||
// TODO: get this lower once we figure out how to extend ETCD cache
|
||||
case <-time.After(1000 * time.Millisecond):
|
||||
counter = counter + 1
|
||||
|
||||
patch := fmt.Sprintf(`{"metadata": {"annotations": {"count": "%d"}}}`, counter)
|
||||
_, err := c.CoreV1().Secrets(secret.Namespace).Patch(secret.Name, types.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch secret: %v", err)
|
||||
}
|
||||
|
||||
*referenceOutput = append(*referenceOutput, fmt.Sprintf("%d", counter))
|
||||
case <-endChannel:
|
||||
return
|
||||
case <-stopChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
initialCount := "0"
|
||||
newTestSecret := func(name string) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespaceObject.Name,
|
||||
Annotations: map[string]string{
|
||||
"count": initialCount,
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"data": []byte("value1\n"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
tt := []struct {
|
||||
name string
|
||||
succeed bool
|
||||
secret *v1.Secret
|
||||
getWatcher func(c *kubernetes.Clientset, secret *v1.Secret) (watch.Interface, error)
|
||||
normalizeOutputFunc func(referenceOutput []string) []string
|
||||
}{
|
||||
{
|
||||
name: "regular watcher should fail",
|
||||
succeed: false,
|
||||
secret: newTestSecret("secret-01"),
|
||||
getWatcher: func(c *kubernetes.Clientset, secret *v1.Secret) (watch.Interface, error) {
|
||||
options := metav1.ListOptions{
|
||||
ResourceVersion: secret.ResourceVersion,
|
||||
}
|
||||
return getWatchFunc(c, secret)(options)
|
||||
}, // regular watcher; unfortunately destined to fail
|
||||
normalizeOutputFunc: noopNormalization,
|
||||
},
|
||||
{
|
||||
name: "InformerWatcher survives closed watches",
|
||||
succeed: true,
|
||||
secret: newTestSecret("secret-03"),
|
||||
getWatcher: func(c *kubernetes.Clientset, secret *v1.Secret) (watch.Interface, error) {
|
||||
lw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return getListFunc(c, secret)(options), nil
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return getWatchFunc(c, secret)(options)
|
||||
},
|
||||
}
|
||||
_, _, w := watchtools.NewIndexerInformerWatcher(lw, &v1.Secret{})
|
||||
return w, nil
|
||||
},
|
||||
normalizeOutputFunc: normalizeInformerOutputFunc(initialCount),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tmptc := range tt {
|
||||
tc := tmptc // we need to copy it for parallel runs
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
c, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create clientset: %v", err)
|
||||
}
|
||||
|
||||
secret, err := c.CoreV1().Secrets(tc.secret.Namespace).Create(tc.secret)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create testing secret %s/%s: %v", tc.secret.Namespace, tc.secret.Name, err)
|
||||
}
|
||||
|
||||
watcher, err := tc.getWatcher(c, secret)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create watcher: %v", err)
|
||||
}
|
||||
|
||||
var referenceOutput []string
|
||||
var output []string
|
||||
stopChan := make(chan struct{})
|
||||
stoppedChan := make(chan struct{})
|
||||
go generateEvents(t, c, secret, &referenceOutput, stopChan, stoppedChan)
|
||||
|
||||
// Record current time to be able to asses if the timeout has been reached
|
||||
startTime := time.Now()
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) {
|
||||
s, ok := event.Object.(*v1.Secret)
|
||||
if !ok {
|
||||
t.Fatalf("Received an object that is not a Secret: %#v", event.Object)
|
||||
}
|
||||
output = append(output, s.Annotations["count"])
|
||||
// Watch will never end voluntarily
|
||||
return false, nil
|
||||
})
|
||||
watchDuration := time.Since(startTime)
|
||||
close(stopChan)
|
||||
<-stoppedChan
|
||||
|
||||
output = tc.normalizeOutputFunc(output)
|
||||
|
||||
t.Logf("Watch duration: %v; timeout: %v", watchDuration, timeout)
|
||||
|
||||
if err == nil && !tc.succeed {
|
||||
t.Fatalf("Watch should have timed out but it exited without an error!")
|
||||
}
|
||||
|
||||
if err != wait.ErrWaitTimeout && tc.succeed {
|
||||
t.Fatalf("Watch exited with error: %v!", err)
|
||||
}
|
||||
|
||||
if watchDuration < timeout && tc.succeed {
|
||||
t.Fatalf("Watch should have timed out after %v but it timed out prematurely after %v!", timeout, watchDuration)
|
||||
}
|
||||
|
||||
if watchDuration >= timeout && !tc.succeed {
|
||||
t.Fatalf("Watch should have timed out but it succeeded!")
|
||||
}
|
||||
|
||||
if tc.succeed && !reflect.DeepEqual(referenceOutput, output) {
|
||||
t.Fatalf("Reference and real output differ! We must have lost some events or read some multiple times!\nRef: %#v\nReal: %#v", referenceOutput, output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
46
vendor/k8s.io/kubernetes/test/integration/apiserver/BUILD
generated
vendored
46
vendor/k8s.io/kubernetes/test/integration/apiserver/BUILD
generated
vendored
@@ -23,35 +23,35 @@ go_test(
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/kubectl/cmd/util:go_default_library",
|
||||
"//pkg/kubectl/genericclioptions:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//pkg/printers:go_default_library",
|
||||
"//pkg/printers/internalversion:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v2alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/api/scheduling/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/settings/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/features:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/pager:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/pborman/uuid:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v2alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/scheduling/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/settings/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/features:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/pager:go_default_library",
|
||||
"//vendor/k8s.io/gengo/examples/set-gen/sets:go_default_library",
|
||||
],
|
||||
)
|
||||
|
8
vendor/k8s.io/kubernetes/test/integration/apiserver/print_test.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/integration/apiserver/print_test.go
generated
vendored
@@ -34,13 +34,13 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/gengo/examples/set-gen/sets"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"k8s.io/kubernetes/pkg/kubectl/genericclioptions"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
@@ -55,6 +55,8 @@ var kindWhiteList = sets.NewString(
|
||||
"ExportOptions",
|
||||
"GetOptions",
|
||||
"ListOptions",
|
||||
"CreateOptions",
|
||||
"UpdateOptions",
|
||||
"NodeProxyOptions",
|
||||
"PodAttachOptions",
|
||||
"PodExecOptions",
|
||||
@@ -111,10 +113,6 @@ var kindWhiteList = sets.NewString(
|
||||
"Eviction",
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/componentconfig
|
||||
"KubeSchedulerConfiguration",
|
||||
// --
|
||||
|
||||
// k8s.io/apimachinery/pkg/apis/meta
|
||||
"WatchEvent",
|
||||
"Status",
|
||||
|
68
vendor/k8s.io/kubernetes/test/integration/auth/BUILD
generated
vendored
68
vendor/k8s.io/kubernetes/test/integration/auth/BUILD
generated
vendored
@@ -23,6 +23,7 @@ go_test(
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/authorization:go_default_library",
|
||||
"//pkg/apis/autoscaling:go_default_library",
|
||||
"//pkg/apis/coordination:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/apis/policy:go_default_library",
|
||||
@@ -30,7 +31,6 @@ go_test(
|
||||
"//pkg/auth/authorizer/abac:go_default_library",
|
||||
"//pkg/auth/nodeidentifier:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/controller/serviceaccount:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubeapiserver/authorizer:go_default_library",
|
||||
@@ -47,43 +47,45 @@ go_test(
|
||||
"//plugin/pkg/admission/noderestriction:go_default_library",
|
||||
"//plugin/pkg/auth/authenticator/token/bootstrap:go_default_library",
|
||||
"//plugin/pkg/auth/authorizer/rbac:go_default_library",
|
||||
"//staging/src/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/authentication/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/group:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/request/bearertoken:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/token/tokenfile:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/registry/generic:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/tokentest:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/bootstrap/token/api:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd/api/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/transport:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
|
||||
"//test/e2e/lifecycle/bootstrap:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/gopkg.in/square/go-jose.v2/jwt:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/group:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/tokentest:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/bootstrap/token/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/transport:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/cert:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
74
vendor/k8s.io/kubernetes/test/integration/auth/node_test.go
generated
vendored
74
vendor/k8s.io/kubernetes/test/integration/auth/node_test.go
generated
vendored
@@ -38,15 +38,16 @@ import (
|
||||
externalclientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/coordination"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
"k8s.io/kubernetes/pkg/auth/nodeidentifier"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubeapiserver/authorizer"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/noderestriction"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func TestNodeAuthorizer(t *testing.T) {
|
||||
@@ -75,7 +76,6 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
// Build client config, clientset, and informers
|
||||
clientConfig := &restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}}
|
||||
superuserClient, superuserClientExternal := clientsetForToken(tokenMaster, clientConfig)
|
||||
informerFactory := informers.NewSharedInformerFactory(superuserClient, time.Minute)
|
||||
versionedInformerFactory := versionedinformers.NewSharedInformerFactory(superuserClientExternal, time.Minute)
|
||||
|
||||
// Enabled CSIPersistentVolume feature at startup so volumeattachments get watched
|
||||
@@ -84,10 +84,12 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
// Enable DynamicKubeletConfig feature so that Node.Spec.ConfigSource can be set
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DynamicKubeletConfig, true)()
|
||||
|
||||
// Enable NodeLease feature so that nodes can create leases
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeLease, true)()
|
||||
|
||||
// Set up Node+RBAC authorizer
|
||||
authorizerConfig := &authorizer.AuthorizationConfig{
|
||||
AuthorizationModes: []string{"Node", "RBAC"},
|
||||
InformerFactory: informerFactory,
|
||||
VersionedInformerFactory: versionedInformerFactory,
|
||||
}
|
||||
nodeRBACAuthorizer, _, err := authorizerConfig.New()
|
||||
@@ -97,7 +99,7 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
|
||||
// Set up NodeRestriction admission
|
||||
nodeRestrictionAdmission := noderestriction.NewPlugin(nodeidentifier.NewDefaultNodeIdentifier())
|
||||
nodeRestrictionAdmission.SetInternalKubeInformerFactory(informerFactory)
|
||||
nodeRestrictionAdmission.SetExternalKubeInformerFactory(versionedInformerFactory)
|
||||
if err := nodeRestrictionAdmission.ValidateInitialization(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -114,7 +116,6 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
// Start the informers
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informerFactory.Start(stopCh)
|
||||
versionedInformerFactory.Start(stopCh)
|
||||
|
||||
// Wait for a healthy server
|
||||
@@ -369,6 +370,54 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
getNode1Lease := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.Coordination().Leases(api.NamespaceNodeLease).Get("node1", metav1.GetOptions{})
|
||||
return err
|
||||
}
|
||||
}
|
||||
node1LeaseDurationSeconds := int32(40)
|
||||
createNode1Lease := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
lease := &coordination.Lease{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
},
|
||||
Spec: coordination.LeaseSpec{
|
||||
HolderIdentity: pointer.StringPtr("node1"),
|
||||
LeaseDurationSeconds: pointer.Int32Ptr(node1LeaseDurationSeconds),
|
||||
RenewTime: &metav1.MicroTime{Time: time.Now()},
|
||||
},
|
||||
}
|
||||
_, err := client.Coordination().Leases(api.NamespaceNodeLease).Create(lease)
|
||||
return err
|
||||
}
|
||||
}
|
||||
updateNode1Lease := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
lease, err := client.Coordination().Leases(api.NamespaceNodeLease).Get("node1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lease.Spec.RenewTime = &metav1.MicroTime{Time: time.Now()}
|
||||
_, err = client.Coordination().Leases(api.NamespaceNodeLease).Update(lease)
|
||||
return err
|
||||
}
|
||||
}
|
||||
patchNode1Lease := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
node1LeaseDurationSeconds++
|
||||
bs := []byte(fmt.Sprintf(`{"spec": {"leaseDurationSeconds": %d}}`, node1LeaseDurationSeconds))
|
||||
_, err := client.Coordination().Leases(api.NamespaceNodeLease).Patch("node1", types.StrategicMergePatchType, bs)
|
||||
return err
|
||||
}
|
||||
}
|
||||
deleteNode1Lease := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
return client.Coordination().Leases(api.NamespaceNodeLease).Delete("node1", &metav1.DeleteOptions{})
|
||||
}
|
||||
}
|
||||
|
||||
nodeanonClient, _ := clientsetForToken(tokenNodeUnknown, clientConfig)
|
||||
node1Client, node1ClientExternal := clientsetForToken(tokenNode1, clientConfig)
|
||||
node2Client, node2ClientExternal := clientsetForToken(tokenNode2, clientConfig)
|
||||
@@ -510,6 +559,21 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
expectAllowed(t, deleteNode2(node2Client))
|
||||
|
||||
//TODO(mikedanese): integration test node restriction of TokenRequest
|
||||
|
||||
// node1 allowed to operate on its own lease
|
||||
expectAllowed(t, createNode1Lease(node1Client))
|
||||
expectAllowed(t, getNode1Lease(node1Client))
|
||||
expectAllowed(t, updateNode1Lease(node1Client))
|
||||
expectAllowed(t, patchNode1Lease(node1Client))
|
||||
expectAllowed(t, deleteNode1Lease(node1Client))
|
||||
// node2 not allowed to operate on another node's lease
|
||||
expectForbidden(t, createNode1Lease(node2Client))
|
||||
expectForbidden(t, getNode1Lease(node2Client))
|
||||
expectForbidden(t, updateNode1Lease(node2Client))
|
||||
expectForbidden(t, patchNode1Lease(node2Client))
|
||||
expectForbidden(t, deleteNode1Lease(node2Client))
|
||||
|
||||
// TODO (verult) CSINodeInfo tests (issue #68254)
|
||||
}
|
||||
|
||||
// expect executes a function a set number of times until it either returns the
|
||||
|
59
vendor/k8s.io/kubernetes/test/integration/auth/rbac_test.go
generated
vendored
59
vendor/k8s.io/kubernetes/test/integration/auth/rbac_test.go
generated
vendored
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -38,6 +39,7 @@ import (
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
externalclientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/client-go/transport"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
@@ -219,6 +221,15 @@ var (
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
aLimitRange = `
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "LimitRange",
|
||||
"metadata": {
|
||||
"name": "a"%s
|
||||
}
|
||||
}
|
||||
`
|
||||
podNamespace = `
|
||||
{
|
||||
@@ -246,6 +257,15 @@ var (
|
||||
"name": "forbidden-namespace"%s
|
||||
}
|
||||
}
|
||||
`
|
||||
limitRangeNamespace = `
|
||||
{
|
||||
"apiVersion": "` + testapi.Groups[api.GroupName].GroupVersion().String() + `",
|
||||
"kind": "Namespace",
|
||||
"metadata": {
|
||||
"name": "limitrange-namespace"%s
|
||||
}
|
||||
}
|
||||
`
|
||||
)
|
||||
|
||||
@@ -409,6 +429,40 @@ func TestRBAC(t *testing.T) {
|
||||
{superUser, "DELETE", "rbac.authorization.k8s.io", "rolebindings", "job-namespace", "pi", "", http.StatusOK},
|
||||
},
|
||||
},
|
||||
{
|
||||
bootstrapRoles: bootstrapRoles{
|
||||
clusterRoles: []rbacapi.ClusterRole{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "allow-all"},
|
||||
Rules: []rbacapi.PolicyRule{ruleAllowAll},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "update-limitranges"},
|
||||
Rules: []rbacapi.PolicyRule{
|
||||
rbacapi.NewRule("update").Groups("").Resources("limitranges").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
},
|
||||
clusterRoleBindings: []rbacapi.ClusterRoleBinding{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "update-limitranges"},
|
||||
Subjects: []rbacapi.Subject{
|
||||
{Kind: "User", Name: "limitrange-updater"},
|
||||
},
|
||||
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "update-limitranges"},
|
||||
},
|
||||
},
|
||||
},
|
||||
requests: []request{
|
||||
// Create the namespace used later in the test
|
||||
{superUser, "POST", "", "namespaces", "", "", limitRangeNamespace, http.StatusCreated},
|
||||
|
||||
{"limitrange-updater", "PUT", "", "limitranges", "limitrange-namespace", "a", aLimitRange, http.StatusForbidden},
|
||||
{superUser, "PUT", "", "limitranges", "limitrange-namespace", "a", aLimitRange, http.StatusCreated},
|
||||
{superUser, "PUT", "", "limitranges", "limitrange-namespace", "a", aLimitRange, http.StatusOK},
|
||||
{"limitrange-updater", "PUT", "", "limitranges", "limitrange-namespace", "a", aLimitRange, http.StatusOK},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range tests {
|
||||
@@ -424,6 +478,7 @@ func TestRBAC(t *testing.T) {
|
||||
"job-writer-namespace": {Name: "job-writer-namespace"},
|
||||
"nonescalating-rolebinding-writer": {Name: "nonescalating-rolebinding-writer"},
|
||||
"pod-reader": {Name: "pod-reader"},
|
||||
"limitrange-updater": {Name: "limitrange-updater"},
|
||||
"user-with-no-permissions": {Name: "user-with-no-permissions"},
|
||||
}))
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
@@ -530,7 +585,9 @@ func TestBootstrapping(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
_, err = watch.Until(30*time.Second, watcher, func(event watch.Event) (bool, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) {
|
||||
if event.Type != watch.Added {
|
||||
return false, nil
|
||||
}
|
||||
|
133
vendor/k8s.io/kubernetes/test/integration/auth/svcaccttoken_test.go
generated
vendored
133
vendor/k8s.io/kubernetes/test/integration/auth/svcaccttoken_test.go
generated
vendored
@@ -20,6 +20,8 @@ import (
|
||||
"crypto/ecdsa"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -35,7 +37,6 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
externalclientset "k8s.io/client-go/kubernetes"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
"k8s.io/kubernetes/pkg/apis/core"
|
||||
serviceaccountgetter "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||
@@ -63,6 +64,12 @@ func TestServiceAccountTokenCreate(t *testing.T) {
|
||||
const iss = "https://foo.bar.example.com"
|
||||
aud := []string{"api"}
|
||||
|
||||
maxExpirationSeconds := int64(60 * 60)
|
||||
maxExpirationDuration, err := time.ParseDuration(fmt.Sprintf("%ds", maxExpirationSeconds))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
gcs := &clientset.Clientset{}
|
||||
|
||||
// Start the server
|
||||
@@ -75,8 +82,13 @@ func TestServiceAccountTokenCreate(t *testing.T) {
|
||||
serviceaccount.NewValidator(aud, serviceaccountgetter.NewGetterFromClient(gcs)),
|
||||
),
|
||||
)
|
||||
masterConfig.ExtraConfig.ServiceAccountIssuer = serviceaccount.JWTTokenGenerator(iss, sk)
|
||||
tokenGenerator, err := serviceaccount.JWTTokenGenerator(iss, sk)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
masterConfig.ExtraConfig.ServiceAccountIssuer = tokenGenerator
|
||||
masterConfig.ExtraConfig.ServiceAccountAPIAudiences = aud
|
||||
masterConfig.ExtraConfig.ServiceAccountMaxExpiration = maxExpirationDuration
|
||||
|
||||
master, _, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
@@ -150,7 +162,10 @@ func TestServiceAccountTokenCreate(t *testing.T) {
|
||||
checkPayload(t, treq.Status.Token, `"myns"`, "kubernetes.io", "namespace")
|
||||
checkPayload(t, treq.Status.Token, `"test-svcacct"`, "kubernetes.io", "serviceaccount", "name")
|
||||
|
||||
doTokenReview(t, cs, treq, false)
|
||||
info := doTokenReview(t, cs, treq, false)
|
||||
if info.Extra != nil {
|
||||
t.Fatalf("expected Extra to be nil but got: %#v", info.Extra)
|
||||
}
|
||||
delSvcAcct()
|
||||
doTokenReview(t, cs, treq, true)
|
||||
})
|
||||
@@ -203,7 +218,16 @@ func TestServiceAccountTokenCreate(t *testing.T) {
|
||||
checkPayload(t, treq.Status.Token, `"myns"`, "kubernetes.io", "namespace")
|
||||
checkPayload(t, treq.Status.Token, `"test-svcacct"`, "kubernetes.io", "serviceaccount", "name")
|
||||
|
||||
doTokenReview(t, cs, treq, false)
|
||||
info := doTokenReview(t, cs, treq, false)
|
||||
if len(info.Extra) != 2 {
|
||||
t.Fatalf("expected Extra have length of 2 but was length %d: %#v", len(info.Extra), info.Extra)
|
||||
}
|
||||
if expected := map[string]authenticationv1.ExtraValue{
|
||||
"authentication.kubernetes.io/pod-name": {pod.ObjectMeta.Name},
|
||||
"authentication.kubernetes.io/pod-uid": {string(pod.ObjectMeta.UID)},
|
||||
}; !reflect.DeepEqual(info.Extra, expected) {
|
||||
t.Fatalf("unexpected Extra:\ngot:\t%#v\nwant:\t%#v", info.Extra, expected)
|
||||
}
|
||||
delPod()
|
||||
doTokenReview(t, cs, treq, true)
|
||||
})
|
||||
@@ -438,9 +462,97 @@ func TestServiceAccountTokenCreate(t *testing.T) {
|
||||
|
||||
doTokenReview(t, cs, treq, true)
|
||||
})
|
||||
|
||||
t.Run("a token request within expiration time", func(t *testing.T) {
|
||||
normalExpirationTime := maxExpirationSeconds - 10*60
|
||||
treq := &authenticationv1.TokenRequest{
|
||||
Spec: authenticationv1.TokenRequestSpec{
|
||||
Audiences: []string{"api"},
|
||||
ExpirationSeconds: &normalExpirationTime,
|
||||
BoundObjectRef: &authenticationv1.BoundObjectReference{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
Name: secret.Name,
|
||||
UID: secret.UID,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
sa, del := createDeleteSvcAcct(t, cs, sa)
|
||||
defer del()
|
||||
|
||||
originalSecret, originalDelSecret := createDeleteSecret(t, cs, secret)
|
||||
defer originalDelSecret()
|
||||
|
||||
treq.Spec.BoundObjectRef.UID = originalSecret.UID
|
||||
if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
checkPayload(t, treq.Status.Token, `"system:serviceaccount:myns:test-svcacct"`, "sub")
|
||||
checkPayload(t, treq.Status.Token, `["api"]`, "aud")
|
||||
checkPayload(t, treq.Status.Token, `null`, "kubernetes.io", "pod")
|
||||
checkPayload(t, treq.Status.Token, `"test-secret"`, "kubernetes.io", "secret", "name")
|
||||
checkPayload(t, treq.Status.Token, `"myns"`, "kubernetes.io", "namespace")
|
||||
checkPayload(t, treq.Status.Token, `"test-svcacct"`, "kubernetes.io", "serviceaccount", "name")
|
||||
checkExpiration(t, treq, normalExpirationTime)
|
||||
|
||||
doTokenReview(t, cs, treq, false)
|
||||
originalDelSecret()
|
||||
doTokenReview(t, cs, treq, true)
|
||||
|
||||
_, recreateDelSecret := createDeleteSecret(t, cs, secret)
|
||||
defer recreateDelSecret()
|
||||
|
||||
doTokenReview(t, cs, treq, true)
|
||||
})
|
||||
|
||||
t.Run("a token request with out-of-range expiration", func(t *testing.T) {
|
||||
tooLongExpirationTime := maxExpirationSeconds + 10*60
|
||||
treq := &authenticationv1.TokenRequest{
|
||||
Spec: authenticationv1.TokenRequestSpec{
|
||||
Audiences: []string{"api"},
|
||||
ExpirationSeconds: &tooLongExpirationTime,
|
||||
BoundObjectRef: &authenticationv1.BoundObjectReference{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
Name: secret.Name,
|
||||
UID: secret.UID,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
sa, del := createDeleteSvcAcct(t, cs, sa)
|
||||
defer del()
|
||||
|
||||
originalSecret, originalDelSecret := createDeleteSecret(t, cs, secret)
|
||||
defer originalDelSecret()
|
||||
|
||||
treq.Spec.BoundObjectRef.UID = originalSecret.UID
|
||||
if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
checkPayload(t, treq.Status.Token, `"system:serviceaccount:myns:test-svcacct"`, "sub")
|
||||
checkPayload(t, treq.Status.Token, `["api"]`, "aud")
|
||||
checkPayload(t, treq.Status.Token, `null`, "kubernetes.io", "pod")
|
||||
checkPayload(t, treq.Status.Token, `"test-secret"`, "kubernetes.io", "secret", "name")
|
||||
checkPayload(t, treq.Status.Token, `"myns"`, "kubernetes.io", "namespace")
|
||||
checkPayload(t, treq.Status.Token, `"test-svcacct"`, "kubernetes.io", "serviceaccount", "name")
|
||||
checkExpiration(t, treq, maxExpirationSeconds)
|
||||
|
||||
doTokenReview(t, cs, treq, false)
|
||||
originalDelSecret()
|
||||
doTokenReview(t, cs, treq, true)
|
||||
|
||||
_, recreateDelSecret := createDeleteSecret(t, cs, secret)
|
||||
defer recreateDelSecret()
|
||||
|
||||
doTokenReview(t, cs, treq, true)
|
||||
})
|
||||
}
|
||||
|
||||
func doTokenReview(t *testing.T, cs externalclientset.Interface, treq *authenticationv1.TokenRequest, expectErr bool) {
|
||||
func doTokenReview(t *testing.T, cs clientset.Interface, treq *authenticationv1.TokenRequest, expectErr bool) authenticationv1.UserInfo {
|
||||
t.Helper()
|
||||
trev, err := cs.AuthenticationV1().TokenReviews().Create(&authenticationv1.TokenReview{
|
||||
Spec: authenticationv1.TokenReviewSpec{
|
||||
@@ -460,6 +572,7 @@ func doTokenReview(t *testing.T, cs externalclientset.Interface, treq *authentic
|
||||
if !trev.Status.Authenticated && !expectErr {
|
||||
t.Fatal("expected token to be authenticated but it wasn't")
|
||||
}
|
||||
return trev.Status.User
|
||||
}
|
||||
|
||||
func checkPayload(t *testing.T, tok string, want string, parts ...string) {
|
||||
@@ -470,6 +583,16 @@ func checkPayload(t *testing.T, tok string, want string, parts ...string) {
|
||||
}
|
||||
}
|
||||
|
||||
func checkExpiration(t *testing.T, treq *authenticationv1.TokenRequest, expectedExpiration int64) {
|
||||
t.Helper()
|
||||
if treq.Spec.ExpirationSeconds == nil {
|
||||
t.Errorf("unexpected nil expiration seconds.")
|
||||
}
|
||||
if *treq.Spec.ExpirationSeconds != expectedExpiration {
|
||||
t.Errorf("unexpected expiration seconds.\nsaw:\t%d\nwant:\t%d", treq.Spec.ExpirationSeconds, expectedExpiration)
|
||||
}
|
||||
}
|
||||
|
||||
func getSubObject(t *testing.T, b string, parts ...string) string {
|
||||
t.Helper()
|
||||
var obj interface{}
|
||||
|
28
vendor/k8s.io/kubernetes/test/integration/client/BUILD
generated
vendored
28
vendor/k8s.io/kubernetes/test/integration/client/BUILD
generated
vendored
@@ -15,25 +15,25 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
135
vendor/k8s.io/kubernetes/test/integration/client/client_test.go
generated
vendored
135
vendor/k8s.io/kubernetes/test/integration/client/client_test.go
generated
vendored
@@ -36,7 +36,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
@@ -44,13 +44,10 @@ import (
|
||||
)
|
||||
|
||||
func TestClient(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins", "ServiceAccount"}, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("client", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
client := clientset.NewForConfigOrDie(result.ClientConfig)
|
||||
|
||||
info, err := client.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
@@ -60,7 +57,7 @@ func TestClient(t *testing.T) {
|
||||
t.Errorf("expected %#v, got %#v", e, a)
|
||||
}
|
||||
|
||||
pods, err := client.Core().Pods(ns.Name).List(metav1.ListOptions{})
|
||||
pods, err := client.CoreV1().Pods("default").List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -72,7 +69,7 @@ func TestClient(t *testing.T) {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "test",
|
||||
Namespace: ns.Name,
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
@@ -83,14 +80,14 @@ func TestClient(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
got, err := client.Core().Pods(ns.Name).Create(pod)
|
||||
got, err := client.CoreV1().Pods("default").Create(pod)
|
||||
if err == nil {
|
||||
t.Fatalf("unexpected non-error: %v", got)
|
||||
}
|
||||
|
||||
// get a created pod
|
||||
pod.Spec.Containers[0].Image = "an-image"
|
||||
got, err = client.Core().Pods(ns.Name).Create(pod)
|
||||
got, err = client.CoreV1().Pods("default").Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -99,7 +96,7 @@ func TestClient(t *testing.T) {
|
||||
}
|
||||
|
||||
// pod is shown, but not scheduled
|
||||
pods, err = client.Core().Pods(ns.Name).List(metav1.ListOptions{})
|
||||
pods, err = client.CoreV1().Pods("default").List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -116,21 +113,18 @@ func TestClient(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAtomicPut(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins", "ServiceAccount"}, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("atomic-put", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
c := clientset.NewForConfigOrDie(result.ClientConfig)
|
||||
|
||||
rcBody := v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: c.Core().RESTClient().APIVersion().String(),
|
||||
APIVersion: c.CoreV1().RESTClient().APIVersion().String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "atomicrc",
|
||||
Namespace: ns.Name,
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"name": "atomicrc",
|
||||
},
|
||||
@@ -154,7 +148,7 @@ func TestAtomicPut(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
rcs := c.Core().ReplicationControllers(ns.Name)
|
||||
rcs := c.CoreV1().ReplicationControllers("default")
|
||||
rc, err := rcs.Create(&rcBody)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating atomicRC: %v", err)
|
||||
@@ -208,23 +202,20 @@ func TestAtomicPut(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPatch(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins", "ServiceAccount"}, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("patch", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
c := clientset.NewForConfigOrDie(result.ClientConfig)
|
||||
|
||||
name := "patchpod"
|
||||
resource := "pods"
|
||||
podBody := v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: c.Core().RESTClient().APIVersion().String(),
|
||||
APIVersion: c.CoreV1().RESTClient().APIVersion().String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
@@ -233,7 +224,7 @@ func TestPatch(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
pods := c.Core().Pods(ns.Name)
|
||||
pods := c.CoreV1().Pods("default")
|
||||
pod, err := pods.Create(&podBody)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating patchpods: %v", err)
|
||||
@@ -263,12 +254,12 @@ func TestPatch(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
pb := patchBodies[c.Core().RESTClient().APIVersion()]
|
||||
pb := patchBodies[c.CoreV1().RESTClient().APIVersion()]
|
||||
|
||||
execPatch := func(pt types.PatchType, body []byte) error {
|
||||
result := c.Core().RESTClient().Patch(pt).
|
||||
result := c.CoreV1().RESTClient().Patch(pt).
|
||||
Resource(resource).
|
||||
Namespace(ns.Name).
|
||||
Namespace("default").
|
||||
Name(name).
|
||||
Body(body).
|
||||
Do()
|
||||
@@ -330,18 +321,15 @@ func TestPatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPatchWithCreateOnUpdate(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("patch-with-create", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
c := clientset.NewForConfigOrDie(result.ClientConfig)
|
||||
|
||||
endpointTemplate := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "patchendpoint",
|
||||
Namespace: ns.Name,
|
||||
Namespace: "default",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
@@ -352,7 +340,7 @@ func TestPatchWithCreateOnUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
patchEndpoint := func(json []byte) (runtime.Object, error) {
|
||||
return c.Core().RESTClient().Patch(types.MergePatchType).Resource("endpoints").Namespace(ns.Name).Name("patchendpoint").Body(json).Do().Get()
|
||||
return c.CoreV1().RESTClient().Patch(types.MergePatchType).Resource("endpoints").Namespace("default").Name("patchendpoint").Body(json).Do().Get()
|
||||
}
|
||||
|
||||
// Make sure patch doesn't get to CreateOnUpdate
|
||||
@@ -367,7 +355,7 @@ func TestPatchWithCreateOnUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the endpoint (endpoints set AllowCreateOnUpdate=true) to get a UID and resource version
|
||||
createdEndpoint, err := c.Core().Endpoints(ns.Name).Update(endpointTemplate)
|
||||
createdEndpoint, err := c.CoreV1().Endpoints("default").Update(endpointTemplate)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating endpoint: %v", err)
|
||||
}
|
||||
@@ -441,12 +429,12 @@ func TestPatchWithCreateOnUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAPIVersions(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
c := clientset.NewForConfigOrDie(result.ClientConfig)
|
||||
|
||||
clientVersion := c.Core().RESTClient().APIVersion().String()
|
||||
clientVersion := c.CoreV1().RESTClient().APIVersion().String()
|
||||
g, err := c.Discovery().ServerGroups()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get api versions: %v", err)
|
||||
@@ -463,23 +451,20 @@ func TestAPIVersions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSingleWatch(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("single-watch", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
client := clientset.NewForConfigOrDie(result.ClientConfig)
|
||||
|
||||
mkEvent := func(i int) *v1.Event {
|
||||
name := fmt.Sprintf("event-%v", i)
|
||||
return &v1.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ns.Name,
|
||||
Namespace: "default",
|
||||
Name: name,
|
||||
},
|
||||
InvolvedObject: v1.ObjectReference{
|
||||
Namespace: ns.Name,
|
||||
Namespace: "default",
|
||||
Name: name,
|
||||
},
|
||||
Reason: fmt.Sprintf("event %v", i),
|
||||
@@ -489,7 +474,7 @@ func TestSingleWatch(t *testing.T) {
|
||||
rv1 := ""
|
||||
for i := 0; i < 10; i++ {
|
||||
event := mkEvent(i)
|
||||
got, err := client.Core().Events(ns.Name).Create(event)
|
||||
got, err := client.CoreV1().Events("default").Create(event)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating event %#q: %v", event, err)
|
||||
}
|
||||
@@ -502,8 +487,8 @@ func TestSingleWatch(t *testing.T) {
|
||||
t.Logf("Created event %#v", got.ObjectMeta)
|
||||
}
|
||||
|
||||
w, err := client.Core().RESTClient().Get().
|
||||
Namespace(ns.Name).
|
||||
w, err := client.CoreV1().RESTClient().Get().
|
||||
Namespace("default").
|
||||
Resource("events").
|
||||
VersionedParams(&metav1.ListOptions{
|
||||
ResourceVersion: rv1,
|
||||
@@ -550,24 +535,21 @@ func TestMultiWatch(t *testing.T) {
|
||||
const watcherCount = 50
|
||||
rt.GOMAXPROCS(watcherCount)
|
||||
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("multi-watch", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
client := clientset.NewForConfigOrDie(result.ClientConfig)
|
||||
|
||||
dummyEvent := func(i int) *v1.Event {
|
||||
name := fmt.Sprintf("unrelated-%v", i)
|
||||
return &v1.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%v.%x", name, time.Now().UnixNano()),
|
||||
Namespace: ns.Name,
|
||||
Namespace: "default",
|
||||
},
|
||||
InvolvedObject: v1.ObjectReference{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
Namespace: "default",
|
||||
},
|
||||
Reason: fmt.Sprintf("unrelated change %v", i),
|
||||
}
|
||||
@@ -585,7 +567,7 @@ func TestMultiWatch(t *testing.T) {
|
||||
for i := 0; i < watcherCount; i++ {
|
||||
watchesStarted.Add(1)
|
||||
name := fmt.Sprintf("multi-watch-%v", i)
|
||||
got, err := client.Core().Pods(ns.Name).Create(&v1.Pod{
|
||||
got, err := client.CoreV1().Pods("default").Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: labels.Set{"watchlabel": name},
|
||||
@@ -606,7 +588,7 @@ func TestMultiWatch(t *testing.T) {
|
||||
LabelSelector: labels.Set{"watchlabel": name}.AsSelector().String(),
|
||||
ResourceVersion: rv,
|
||||
}
|
||||
w, err := client.Core().Pods(ns.Name).Watch(options)
|
||||
w, err := client.CoreV1().Pods("default").Watch(options)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("watch error for %v: %v", name, err))
|
||||
}
|
||||
@@ -655,7 +637,7 @@ func TestMultiWatch(t *testing.T) {
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if _, err := client.Core().Events(ns.Name).Create(dummyEvent(i)); err != nil {
|
||||
if _, err := client.CoreV1().Events("default").Create(dummyEvent(i)); err != nil {
|
||||
panic(fmt.Sprintf("couldn't make an event: %v", err))
|
||||
}
|
||||
changeMade <- i
|
||||
@@ -692,7 +674,7 @@ func TestMultiWatch(t *testing.T) {
|
||||
return
|
||||
}
|
||||
name := fmt.Sprintf("unrelated-%v", i)
|
||||
_, err := client.Core().Pods(ns.Name).Create(&v1.Pod{
|
||||
_, err := client.CoreV1().Pods("default").Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
@@ -726,13 +708,13 @@ func TestMultiWatch(t *testing.T) {
|
||||
for i := 0; i < watcherCount; i++ {
|
||||
go func(i int) {
|
||||
name := fmt.Sprintf("multi-watch-%v", i)
|
||||
pod, err := client.Core().Pods(ns.Name).Get(name, metav1.GetOptions{})
|
||||
pod, err := client.CoreV1().Pods("default").Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Couldn't get %v: %v", name, err))
|
||||
}
|
||||
pod.Spec.Containers[0].Image = imageutils.GetPauseImageName()
|
||||
sentTimes <- timePair{time.Now(), name}
|
||||
if _, err := client.Core().Pods(ns.Name).Update(pod); err != nil {
|
||||
if _, err := client.CoreV1().Pods("default").Update(pod); err != nil {
|
||||
panic(fmt.Sprintf("Couldn't make %v: %v", name, err))
|
||||
}
|
||||
}(i)
|
||||
@@ -806,13 +788,10 @@ func runSelfLinkTestOnNamespace(t *testing.T, c clientset.Interface, namespace s
|
||||
}
|
||||
|
||||
func TestSelfLinkOnNamespace(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins", "ServiceAccount"}, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("selflink", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
c := clientset.NewForConfigOrDie(result.ClientConfig)
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
runSelfLinkTestOnNamespace(t, c, ns.Name)
|
||||
runSelfLinkTestOnNamespace(t, c, "default")
|
||||
}
|
||||
|
127
vendor/k8s.io/kubernetes/test/integration/client/dynamic_client_test.go
generated
vendored
127
vendor/k8s.io/kubernetes/test/integration/client/dynamic_client_test.go
generated
vendored
@@ -17,36 +17,32 @@ limitations under the License.
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestDynamicClient(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins", "ServiceAccount"}, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("dynamic-client", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
gv := &schema.GroupVersion{Group: "", Version: "v1"}
|
||||
config := &restclient.Config{
|
||||
Host: s.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: gv},
|
||||
}
|
||||
|
||||
client := clientset.NewForConfigOrDie(config)
|
||||
dynamicClient, err := dynamic.NewForConfig(config)
|
||||
client := clientset.NewForConfigOrDie(result.ClientConfig)
|
||||
dynamicClient, err := dynamic.NewForConfig(result.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating dynamic client: %v", err)
|
||||
}
|
||||
@@ -68,13 +64,13 @@ func TestDynamicClient(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
actual, err := client.Core().Pods(ns.Name).Create(pod)
|
||||
actual, err := client.CoreV1().Pods("default").Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when creating pod: %v", err)
|
||||
}
|
||||
|
||||
// check dynamic list
|
||||
unstructuredList, err := dynamicClient.Resource(resource).Namespace(ns.Name).List(metav1.ListOptions{})
|
||||
unstructuredList, err := dynamicClient.Resource(resource).Namespace("default").List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when listing pods: %v", err)
|
||||
}
|
||||
@@ -93,7 +89,7 @@ func TestDynamicClient(t *testing.T) {
|
||||
}
|
||||
|
||||
// check dynamic get
|
||||
unstruct, err := dynamicClient.Resource(resource).Namespace(ns.Name).Get(actual.Name, metav1.GetOptions{})
|
||||
unstruct, err := dynamicClient.Resource(resource).Namespace("default").Get(actual.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when getting pod %q: %v", actual.Name, err)
|
||||
}
|
||||
@@ -108,12 +104,12 @@ func TestDynamicClient(t *testing.T) {
|
||||
}
|
||||
|
||||
// delete the pod dynamically
|
||||
err = dynamicClient.Resource(resource).Namespace(ns.Name).Delete(actual.Name, nil)
|
||||
err = dynamicClient.Resource(resource).Namespace("default").Delete(actual.Name, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when deleting pod: %v", err)
|
||||
}
|
||||
|
||||
list, err := client.Core().Pods(ns.Name).List(metav1.ListOptions{})
|
||||
list, err := client.CoreV1().Pods("default").List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when listing pods: %v", err)
|
||||
}
|
||||
@@ -123,6 +119,89 @@ func TestDynamicClient(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDynamicClientWatch(t *testing.T) {
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
client := clientset.NewForConfigOrDie(result.ClientConfig)
|
||||
dynamicClient, err := dynamic.NewForConfig(result.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating dynamic client: %v", err)
|
||||
}
|
||||
|
||||
resource := v1.SchemeGroupVersion.WithResource("events")
|
||||
|
||||
mkEvent := func(i int) *v1.Event {
|
||||
name := fmt.Sprintf("event-%v", i)
|
||||
return &v1.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: name,
|
||||
},
|
||||
InvolvedObject: v1.ObjectReference{
|
||||
Namespace: "default",
|
||||
Name: name,
|
||||
},
|
||||
Reason: fmt.Sprintf("event %v", i),
|
||||
}
|
||||
}
|
||||
|
||||
rv1 := ""
|
||||
for i := 0; i < 10; i++ {
|
||||
event := mkEvent(i)
|
||||
got, err := client.CoreV1().Events("default").Create(event)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating event %#q: %v", event, err)
|
||||
}
|
||||
if rv1 == "" {
|
||||
rv1 = got.ResourceVersion
|
||||
if rv1 == "" {
|
||||
t.Fatal("did not get a resource version.")
|
||||
}
|
||||
}
|
||||
t.Logf("Created event %#v", got.ObjectMeta)
|
||||
}
|
||||
|
||||
w, err := dynamicClient.Resource(resource).Namespace("default").Watch(metav1.ListOptions{
|
||||
ResourceVersion: rv1,
|
||||
Watch: true,
|
||||
FieldSelector: fields.OneTermEqualSelector("metadata.name", "event-9").String(),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed watch: %v", err)
|
||||
}
|
||||
defer w.Stop()
|
||||
|
||||
select {
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Fatalf("watch took longer than %s", wait.ForeverTestTimeout.String())
|
||||
case got, ok := <-w.ResultChan():
|
||||
if !ok {
|
||||
t.Fatal("Watch channel closed unexpectedly.")
|
||||
}
|
||||
|
||||
// We expect to see an ADD of event-9 and only event-9. (This
|
||||
// catches a bug where all the events would have been sent down
|
||||
// the channel.)
|
||||
if e, a := watch.Added, got.Type; e != a {
|
||||
t.Errorf("Wanted %v, got %v", e, a)
|
||||
}
|
||||
|
||||
unstructured, ok := got.Object.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
t.Fatalf("Unexpected watch event containing object %#q", got.Object)
|
||||
}
|
||||
event, err := unstructuredToEvent(unstructured)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error converting Unstructured to v1.Event: %v", err)
|
||||
}
|
||||
if e, a := "event-9", event.Name; e != a {
|
||||
t.Errorf("Wanted %v, got %v", e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func unstructuredToPod(obj *unstructured.Unstructured) (*v1.Pod, error) {
|
||||
json, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj)
|
||||
if err != nil {
|
||||
@@ -134,3 +213,13 @@ func unstructuredToPod(obj *unstructured.Unstructured) (*v1.Pod, error) {
|
||||
pod.APIVersion = ""
|
||||
return pod, err
|
||||
}
|
||||
|
||||
func unstructuredToEvent(obj *unstructured.Unstructured) (*v1.Event, error) {
|
||||
json, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
event := new(v1.Event)
|
||||
err = runtime.DecodeInto(testapi.Default.Codec(), json, event)
|
||||
return event, err
|
||||
}
|
||||
|
10
vendor/k8s.io/kubernetes/test/integration/configmap/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/test/integration/configmap/BUILD
generated
vendored
@@ -14,13 +14,13 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
45
vendor/k8s.io/kubernetes/test/integration/controllermanager/BUILD
generated
vendored
Normal file
45
vendor/k8s.io/kubernetes/test/integration/controllermanager/BUILD
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"main_test.go",
|
||||
"serving_test.go",
|
||||
],
|
||||
tags = [
|
||||
"etcd",
|
||||
"integration",
|
||||
],
|
||||
deps = [
|
||||
"//cmd/cloud-controller-manager/app/testing:go_default_library",
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
"//cmd/kube-controller-manager/app/testing:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/fake:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
27
vendor/k8s.io/kubernetes/test/integration/controllermanager/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/controllermanager/main_test.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllermanager
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
349
vendor/k8s.io/kubernetes/test/integration/controllermanager/serving_test.go
generated
vendored
Normal file
349
vendor/k8s.io/kubernetes/test/integration/controllermanager/serving_test.go
generated
vendored
Normal file
@@ -0,0 +1,349 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllermanager
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/apiserver/pkg/server/options"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
cloudctrlmgrtesting "k8s.io/kubernetes/cmd/cloud-controller-manager/app/testing"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
kubectrlmgrtesting "k8s.io/kubernetes/cmd/kube-controller-manager/app/testing"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
type controllerManagerTester interface {
|
||||
StartTestServer(t kubectrlmgrtesting.Logger, customFlags []string) (*options.SecureServingOptionsWithLoopback, *server.SecureServingInfo, *server.DeprecatedInsecureServingInfo, func(), error)
|
||||
}
|
||||
|
||||
type kubeControllerManagerTester struct{}
|
||||
|
||||
func (kubeControllerManagerTester) StartTestServer(t kubectrlmgrtesting.Logger, customFlags []string) (*options.SecureServingOptionsWithLoopback, *server.SecureServingInfo, *server.DeprecatedInsecureServingInfo, func(), error) {
|
||||
gotResult, err := kubectrlmgrtesting.StartTestServer(t, customFlags)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
return gotResult.Options.SecureServing, gotResult.Config.SecureServing, gotResult.Config.InsecureServing, gotResult.TearDownFn, err
|
||||
}
|
||||
|
||||
type cloudControllerManagerTester struct{}
|
||||
|
||||
func (cloudControllerManagerTester) StartTestServer(t kubectrlmgrtesting.Logger, customFlags []string) (*options.SecureServingOptionsWithLoopback, *server.SecureServingInfo, *server.DeprecatedInsecureServingInfo, func(), error) {
|
||||
gotResult, err := cloudctrlmgrtesting.StartTestServer(t, customFlags)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
return gotResult.Options.SecureServing, gotResult.Config.SecureServing, gotResult.Config.InsecureServing, gotResult.TearDownFn, err
|
||||
}
|
||||
|
||||
func TestControllerManagerServing(t *testing.T) {
|
||||
if !cloudprovider.IsCloudProvider("fake") {
|
||||
cloudprovider.RegisterCloudProvider("fake", fakeCloudProviderFactory)
|
||||
}
|
||||
|
||||
// Insulate this test from picking up in-cluster config when run inside a pod
|
||||
// We can't assume we have permissions to write to /var/run/secrets/... from a unit test to mock in-cluster config for testing
|
||||
originalHost := os.Getenv("KUBERNETES_SERVICE_HOST")
|
||||
if len(originalHost) > 0 {
|
||||
os.Setenv("KUBERNETES_SERVICE_HOST", "")
|
||||
defer os.Setenv("KUBERNETES_SERVICE_HOST", originalHost)
|
||||
}
|
||||
|
||||
// authenticate to apiserver via bearer token
|
||||
token := "flwqkenfjasasdfmwerasd"
|
||||
tokenFile, err := ioutil.TempFile("", "kubeconfig")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tokenFile.WriteString(fmt.Sprintf(`
|
||||
%s,controller-manager,controller-manager,""
|
||||
`, token))
|
||||
tokenFile.Close()
|
||||
|
||||
// start apiserver
|
||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{
|
||||
"--token-auth-file", tokenFile.Name(),
|
||||
"--authorization-mode", "RBAC",
|
||||
}, framework.SharedEtcd())
|
||||
defer server.TearDownFn()
|
||||
|
||||
// allow controller-manager to do SubjectAccessReview
|
||||
client, err := kubernetes.NewForConfig(server.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating client config: %v", err)
|
||||
}
|
||||
_, err = client.RbacV1().ClusterRoleBindings().Create(&rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "controller-manager:system:auth-delegator"},
|
||||
Subjects: []rbacv1.Subject{{
|
||||
Kind: "User",
|
||||
Name: "controller-manager",
|
||||
}},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "system:auth-delegator",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create system:auth-delegator rbac cluster role binding: %v", err)
|
||||
}
|
||||
|
||||
// allow controller-manager to read kube-system/extension-apiserver-authentication
|
||||
_, err = client.RbacV1().RoleBindings("kube-system").Create(&rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "controller-manager:extension-apiserver-authentication-reader"},
|
||||
Subjects: []rbacv1.Subject{{
|
||||
Kind: "User",
|
||||
Name: "controller-manager",
|
||||
}},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "Role",
|
||||
Name: "extension-apiserver-authentication-reader",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create controller-manager:extension-apiserver-authentication-reader rbac role binding: %v", err)
|
||||
}
|
||||
|
||||
// create kubeconfig for the apiserver
|
||||
apiserverConfig, err := ioutil.TempFile("", "kubeconfig")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
apiserverConfig.WriteString(fmt.Sprintf(`
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
server: %s
|
||||
certificate-authority: %s
|
||||
name: integration
|
||||
contexts:
|
||||
- context:
|
||||
cluster: integration
|
||||
user: controller-manager
|
||||
name: default-context
|
||||
current-context: default-context
|
||||
users:
|
||||
- name: controller-manager
|
||||
user:
|
||||
token: %s
|
||||
`, server.ClientConfig.Host, server.ServerOpts.SecureServing.ServerCert.CertKey.CertFile, token))
|
||||
apiserverConfig.Close()
|
||||
|
||||
// create BROKEN kubeconfig for the apiserver
|
||||
brokenApiserverConfig, err := ioutil.TempFile("", "kubeconfig")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
brokenApiserverConfig.WriteString(fmt.Sprintf(`
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
server: %s
|
||||
certificate-authority: %s
|
||||
name: integration
|
||||
contexts:
|
||||
- context:
|
||||
cluster: integration
|
||||
user: controller-manager
|
||||
name: default-context
|
||||
current-context: default-context
|
||||
users:
|
||||
- name: controller-manager
|
||||
user:
|
||||
token: WRONGTOKEN
|
||||
`, server.ClientConfig.Host, server.ServerOpts.SecureServing.ServerCert.CertKey.CertFile))
|
||||
brokenApiserverConfig.Close()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
tester controllerManagerTester
|
||||
extraFlags []string
|
||||
}{
|
||||
{"kube-controller-manager", kubeControllerManagerTester{}, nil},
|
||||
{"cloud-controller-manager", cloudControllerManagerTester{}, []string{"--cloud-provider=fake"}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
testControllerManager(t, tt.tester, apiserverConfig.Name(), brokenApiserverConfig.Name(), token, tt.extraFlags)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testControllerManager(t *testing.T, tester controllerManagerTester, kubeconfig, brokenKubeconfig, token string, extraFlags []string) {
|
||||
tests := []struct {
|
||||
name string
|
||||
flags []string
|
||||
path string
|
||||
anonymous bool // to use the token or not
|
||||
wantErr bool
|
||||
wantSecureCode, wantInsecureCode *int
|
||||
}{
|
||||
{"no-flags", nil, "/healthz", false, true, nil, nil},
|
||||
{"insecurely /healthz", []string{
|
||||
"--secure-port=0",
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/healthz", true, false, nil, intPtr(http.StatusOK)},
|
||||
{"insecurely /metrics", []string{
|
||||
"--secure-port=0",
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/metrics", true, false, nil, intPtr(http.StatusOK)},
|
||||
{"/healthz without authn/authz", []string{
|
||||
"--port=0",
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/healthz", true, false, intPtr(http.StatusOK), nil},
|
||||
{"/metrics without auhn/z", []string{
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/metrics", true, false, intPtr(http.StatusForbidden), intPtr(http.StatusOK)},
|
||||
{"authorization skipped for /healthz with authn/authz", []string{
|
||||
"--port=0",
|
||||
"--authentication-kubeconfig", kubeconfig,
|
||||
"--authorization-kubeconfig", kubeconfig,
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/healthz", false, false, intPtr(http.StatusOK), nil},
|
||||
{"authorization skipped for /healthz with BROKEN authn/authz", []string{
|
||||
"--port=0",
|
||||
"--authentication-skip-lookup", // to survive unaccessible extensions-apiserver-authentication configmap
|
||||
"--authentication-kubeconfig", brokenKubeconfig,
|
||||
"--authorization-kubeconfig", brokenKubeconfig,
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/healthz", false, false, intPtr(http.StatusOK), nil},
|
||||
{"not authorized /metrics", []string{
|
||||
"--port=0",
|
||||
"--authentication-kubeconfig", kubeconfig,
|
||||
"--authorization-kubeconfig", kubeconfig,
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/metrics", false, false, intPtr(http.StatusForbidden), nil},
|
||||
{"not authorized /metrics with BROKEN authn/authz", []string{
|
||||
"--authentication-kubeconfig", kubeconfig,
|
||||
"--authorization-kubeconfig", brokenKubeconfig,
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/metrics", false, false, intPtr(http.StatusInternalServerError), intPtr(http.StatusOK)},
|
||||
{"always-allowed /metrics with BROKEN authn/authz", []string{
|
||||
"--port=0",
|
||||
"--authentication-skip-lookup", // to survive unaccessible extensions-apiserver-authentication configmap
|
||||
"--authentication-kubeconfig", kubeconfig,
|
||||
"--authorization-kubeconfig", kubeconfig,
|
||||
"--authorization-always-allow-paths", "/healthz,/metrics",
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/metrics", false, false, intPtr(http.StatusOK), nil},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
secureOptions, secureInfo, insecureInfo, tearDownFn, err := tester.StartTestServer(t, append(append([]string{}, tt.flags...), extraFlags...))
|
||||
if tearDownFn != nil {
|
||||
defer tearDownFn()
|
||||
}
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Fatalf("StartTestServer() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if want, got := tt.wantSecureCode != nil, secureInfo != nil; want != got {
|
||||
t.Errorf("SecureServing enabled: expected=%v got=%v", want, got)
|
||||
} else if want {
|
||||
url := fmt.Sprintf("https://%s%s", secureInfo.Listener.Addr().String(), tt.path)
|
||||
url = strings.Replace(url, "[::]", "127.0.0.1", -1) // switch to IPv4 because the self-signed cert does not support [::]
|
||||
|
||||
// read self-signed server cert disk
|
||||
pool := x509.NewCertPool()
|
||||
serverCertPath := path.Join(secureOptions.ServerCert.CertDirectory, secureOptions.ServerCert.PairName+".crt")
|
||||
serverCert, err := ioutil.ReadFile(serverCertPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read controller-manager server cert %q: %v", serverCertPath, err)
|
||||
}
|
||||
pool.AppendCertsFromPEM(serverCert)
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: pool,
|
||||
},
|
||||
}
|
||||
|
||||
client := &http.Client{Transport: tr}
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !tt.anonymous {
|
||||
req.Header.Add("Authorization", fmt.Sprintf("Token %s", token))
|
||||
}
|
||||
r, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to GET %s from controller-manager: %v", tt.path, err)
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
defer r.Body.Close()
|
||||
if got, expected := r.StatusCode, *tt.wantSecureCode; got != expected {
|
||||
t.Fatalf("expected http %d at %s of controller-manager, got: %d %q", expected, tt.path, got, string(body))
|
||||
}
|
||||
}
|
||||
|
||||
if want, got := tt.wantInsecureCode != nil, insecureInfo != nil; want != got {
|
||||
t.Errorf("InsecureServing enabled: expected=%v got=%v", want, got)
|
||||
} else if want {
|
||||
url := fmt.Sprintf("http://%s%s", insecureInfo.Listener.Addr().String(), tt.path)
|
||||
r, err := http.Get(url)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to GET %s from controller-manager: %v", tt.path, err)
|
||||
}
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
defer r.Body.Close()
|
||||
if got, expected := r.StatusCode, *tt.wantInsecureCode; got != expected {
|
||||
t.Fatalf("expected http %d at %s of controller-manager, got: %d %q", expected, tt.path, got, string(body))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func intPtr(x int) *int {
|
||||
return &x
|
||||
}
|
||||
|
||||
func fakeCloudProviderFactory(io.Reader) (cloudprovider.Interface, error) {
|
||||
return &fake.FakeCloud{}, nil
|
||||
}
|
37
vendor/k8s.io/kubernetes/test/integration/daemonset/BUILD
generated
vendored
37
vendor/k8s.io/kubernetes/test/integration/daemonset/BUILD
generated
vendored
@@ -16,30 +16,33 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/daemon:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/daemonset/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/daemonset/OWNERS
generated
vendored
@@ -7,3 +7,5 @@ reviewers:
|
||||
- lukaszo
|
||||
- janetkuo
|
||||
- kow3ns
|
||||
labels:
|
||||
- sig/apps
|
||||
|
406
vendor/k8s.io/kubernetes/test/integration/daemonset/daemonset_test.go
generated
vendored
406
vendor/k8s.io/kubernetes/test/integration/daemonset/daemonset_test.go
generated
vendored
@@ -39,8 +39,11 @@ import (
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
@@ -48,7 +51,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
@@ -65,13 +68,13 @@ func setup(t *testing.T) (*httptest.Server, framework.CloseFunc, *daemon.DaemonS
|
||||
}
|
||||
resyncPeriod := 12 * time.Hour
|
||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "daemonset-informers")), resyncPeriod)
|
||||
metrics.UnregisterMetricAndUntrackRateLimiterUsage("daemon_controller")
|
||||
dc, err := daemon.NewDaemonSetsController(
|
||||
informers.Apps().V1().DaemonSets(),
|
||||
informers.Apps().V1().ControllerRevisions(),
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Core().V1().Nodes(),
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "daemonset-controller")),
|
||||
flowcontrol.NewBackOff(5*time.Second, 15*time.Minute),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
@@ -91,23 +94,27 @@ func setupScheduler(
|
||||
return
|
||||
}
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
cs,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
true,
|
||||
false,
|
||||
)
|
||||
// Enable Features.
|
||||
algorithmprovider.ApplyFeatureGates()
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(&factory.ConfigFactoryArgs{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
Client: cs,
|
||||
NodeInformer: informerFactory.Core().V1().Nodes(),
|
||||
PodInformer: informerFactory.Core().V1().Pods(),
|
||||
PvInformer: informerFactory.Core().V1().PersistentVolumes(),
|
||||
PvcInformer: informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
ReplicationControllerInformer: informerFactory.Core().V1().ReplicationControllers(),
|
||||
ReplicaSetInformer: informerFactory.Apps().V1().ReplicaSets(),
|
||||
StatefulSetInformer: informerFactory.Apps().V1().StatefulSets(),
|
||||
ServiceInformer: informerFactory.Core().V1().Services(),
|
||||
PdbInformer: informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
StorageClassInformer: informerFactory.Storage().V1().StorageClasses(),
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
EnableEquivalenceClassCache: true,
|
||||
DisablePreemption: false,
|
||||
PercentageOfNodesToScore: 100,
|
||||
})
|
||||
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
if err != nil {
|
||||
@@ -271,11 +278,13 @@ func newNode(name string, label map[string]string) *v1.Node {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: label,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceNone,
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}},
|
||||
Allocatable: v1.ResourceList{v1.ResourcePods: resource.MustParse("100")},
|
||||
// minimum version required to use matchFields
|
||||
NodeInfo: v1.NodeSystemInfo{KubeletVersion: "v1.11.0"},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -293,7 +302,8 @@ func validateDaemonSetPodsAndMarkReady(
|
||||
podClient corev1typed.PodInterface,
|
||||
podInformer cache.SharedIndexInformer,
|
||||
numberPods int,
|
||||
t *testing.T) {
|
||||
t *testing.T,
|
||||
) {
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
objects := podInformer.GetIndexer().List()
|
||||
if len(objects) != numberPods {
|
||||
@@ -372,6 +382,52 @@ func waitForPodsCreated(podInformer cache.SharedIndexInformer, num int) error {
|
||||
})
|
||||
}
|
||||
|
||||
func waitForDaemonSetAndControllerRevisionCreated(c clientset.Interface, name string, namespace string) error {
|
||||
return wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) {
|
||||
ds, err := c.AppsV1().DaemonSets(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ds == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
revs, err := c.AppsV1().ControllerRevisions(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if revs.Size() == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, rev := range revs.Items {
|
||||
for _, oref := range rev.OwnerReferences {
|
||||
if oref.Kind == "DaemonSet" && oref.UID == ds.UID {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
func hashAndNameForDaemonSet(ds *apps.DaemonSet) (string, string) {
|
||||
hash := fmt.Sprint(controller.ComputeHash(&ds.Spec.Template, ds.Status.CollisionCount))
|
||||
name := ds.Name + "-" + hash
|
||||
return hash, name
|
||||
}
|
||||
|
||||
func validateDaemonSetCollisionCount(dsClient appstyped.DaemonSetInterface, dsName string, expCount int32, t *testing.T) {
|
||||
ds, err := dsClient.Get(dsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to look up DaemonSet: %v", err)
|
||||
}
|
||||
collisionCount := ds.Status.CollisionCount
|
||||
if *collisionCount != expCount {
|
||||
t.Fatalf("Expected collisionCount to be %d, but found %d", expCount, *collisionCount)
|
||||
}
|
||||
}
|
||||
|
||||
func validateDaemonSetStatus(
|
||||
dsClient appstyped.DaemonSetInterface,
|
||||
dsName string,
|
||||
@@ -413,16 +469,36 @@ func validateFailedPlacementEvent(eventClient corev1typed.EventInterface, t *tes
|
||||
}
|
||||
}
|
||||
|
||||
func updateDS(t *testing.T, dsClient appstyped.DaemonSetInterface, dsName string, updateFunc func(*apps.DaemonSet)) *apps.DaemonSet {
|
||||
var ds *apps.DaemonSet
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
newDS, err := dsClient.Get(dsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updateFunc(newDS)
|
||||
ds, err = dsClient.Update(newDS)
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to update DaemonSet: %v", err)
|
||||
}
|
||||
return ds
|
||||
}
|
||||
|
||||
func forEachFeatureGate(t *testing.T, tf func(t *testing.T)) {
|
||||
for _, fg := range featureGates() {
|
||||
func() {
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled(fg)
|
||||
defer func() {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled))
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled)); err != nil {
|
||||
t.Fatalf("Failed to set FeatureGate %v to %t", fg, enabled)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, f := range []bool{true, false} {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f))
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f)); err != nil {
|
||||
t.Fatalf("Failed to set FeatureGate %v to %t", fg, f)
|
||||
}
|
||||
t.Run(fmt.Sprintf("%v (%t)", fg, f), tf)
|
||||
}
|
||||
}()
|
||||
@@ -630,7 +706,23 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func setFeatureGate(t *testing.T, feature utilfeature.Feature, enabled bool) {
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", feature, enabled)); err != nil {
|
||||
t.Fatalf("Failed to set FeatureGate %v to %t: %v", feature, enabled, err)
|
||||
}
|
||||
}
|
||||
|
||||
// When ScheduleDaemonSetPods is disabled, DaemonSets should not launch onto nodes with insufficient capacity.
|
||||
// Look for TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled, we don't need this test anymore.
|
||||
func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods)
|
||||
// Rollback feature gate.
|
||||
defer func() {
|
||||
if enabled {
|
||||
setFeatureGate(t, features.ScheduleDaemonSetPods, true)
|
||||
}
|
||||
}()
|
||||
setFeatureGate(t, features.ScheduleDaemonSetPods, false)
|
||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
defer closeFn()
|
||||
@@ -673,11 +765,15 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
func TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled(t *testing.T) {
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods)
|
||||
defer func() {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
|
||||
features.ScheduleDaemonSetPods, enabled))
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
|
||||
features.ScheduleDaemonSetPods, enabled)); err != nil {
|
||||
t.Fatalf("Failed to set FeatureGate %v to %t", features.ScheduleDaemonSetPods, enabled)
|
||||
}
|
||||
}()
|
||||
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.ScheduleDaemonSetPods, true))
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.ScheduleDaemonSetPods, true)); err != nil {
|
||||
t.Fatalf("Failed to set FeatureGate %v to %t", features.ScheduleDaemonSetPods, true)
|
||||
}
|
||||
|
||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
@@ -740,3 +836,261 @@ func TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled(t *testing.T)
|
||||
validateDaemonSetStatus(dsClient, ds.Name, 1, t)
|
||||
})
|
||||
}
|
||||
|
||||
// TestLaunchWithHashCollision tests that a DaemonSet can be updated even if there is a
|
||||
// hash collision with an existing ControllerRevision
|
||||
func TestLaunchWithHashCollision(t *testing.T) {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("one-node-daemonset-test", server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
||||
podInformer := informers.Core().V1().Pods().Informer()
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
informers.Start(stopCh)
|
||||
go dc.Run(1, stopCh)
|
||||
|
||||
setupScheduler(t, clientset, informers, stopCh)
|
||||
|
||||
// Create single node
|
||||
_, err := nodeClient.Create(newNode("single-node", nil))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
// Create new DaemonSet with RollingUpdate strategy
|
||||
orgDs := newDaemonSet("foo", ns.Name)
|
||||
oneIntString := intstr.FromInt(1)
|
||||
orgDs.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{
|
||||
Type: apps.RollingUpdateDaemonSetStrategyType,
|
||||
RollingUpdate: &apps.RollingUpdateDaemonSet{
|
||||
MaxUnavailable: &oneIntString,
|
||||
},
|
||||
}
|
||||
ds, err := dsClient.Create(orgDs)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DaemonSet: %v", err)
|
||||
}
|
||||
|
||||
// Wait for the DaemonSet to be created before proceeding
|
||||
err = waitForDaemonSetAndControllerRevisionCreated(clientset, ds.Name, ds.Namespace)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DaemonSet: %v", err)
|
||||
}
|
||||
|
||||
ds, err = dsClient.Get(ds.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get DaemonSet: %v", err)
|
||||
}
|
||||
var orgCollisionCount int32
|
||||
if ds.Status.CollisionCount != nil {
|
||||
orgCollisionCount = *ds.Status.CollisionCount
|
||||
}
|
||||
|
||||
// Look up the ControllerRevision for the DaemonSet
|
||||
_, name := hashAndNameForDaemonSet(ds)
|
||||
revision, err := clientset.AppsV1().ControllerRevisions(ds.Namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil || revision == nil {
|
||||
t.Fatalf("Failed to look up ControllerRevision: %v", err)
|
||||
}
|
||||
|
||||
// Create a "fake" ControllerRevision that we know will create a hash collision when we make
|
||||
// the next update
|
||||
one := int64(1)
|
||||
ds.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
|
||||
|
||||
newHash, newName := hashAndNameForDaemonSet(ds)
|
||||
newRevision := &apps.ControllerRevision{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: newName,
|
||||
Namespace: ds.Namespace,
|
||||
Labels: labelsutil.CloneAndAddLabel(ds.Spec.Template.Labels, apps.DefaultDaemonSetUniqueLabelKey, newHash),
|
||||
Annotations: ds.Annotations,
|
||||
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(ds, apps.SchemeGroupVersion.WithKind("DaemonSet"))},
|
||||
},
|
||||
Data: revision.Data,
|
||||
Revision: revision.Revision + 1,
|
||||
}
|
||||
_, err = clientset.AppsV1().ControllerRevisions(ds.Namespace).Create(newRevision)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create ControllerRevision: %v", err)
|
||||
}
|
||||
|
||||
// Make an update of the DaemonSet which we know will create a hash collision when
|
||||
// the next ControllerRevision is created.
|
||||
ds = updateDS(t, dsClient, ds.Name, func(updateDS *apps.DaemonSet) {
|
||||
updateDS.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
|
||||
})
|
||||
|
||||
// Wait for any pod with the latest Spec to exist
|
||||
err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) {
|
||||
objects := podInformer.GetIndexer().List()
|
||||
for _, object := range objects {
|
||||
pod := object.(*v1.Pod)
|
||||
if *pod.Spec.TerminationGracePeriodSeconds == *ds.Spec.Template.Spec.TerminationGracePeriodSeconds {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to wait for Pods with the latest Spec to be created: %v", err)
|
||||
}
|
||||
|
||||
validateDaemonSetCollisionCount(dsClient, ds.Name, orgCollisionCount+1, t)
|
||||
}
|
||||
|
||||
// TestTaintedNode tests that no matter "ScheduleDaemonSetPods" feature is enabled or not
|
||||
// tainted node isn't expected to have pod scheduled
|
||||
func TestTaintedNode(t *testing.T) {
|
||||
forEachFeatureGate(t, func(t *testing.T) {
|
||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("tainted-node", server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
||||
podClient := clientset.CoreV1().Pods(ns.Name)
|
||||
podInformer := informers.Core().V1().Pods().Informer()
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
informers.Start(stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Start Scheduler
|
||||
setupScheduler(t, clientset, informers, stopCh)
|
||||
|
||||
ds := newDaemonSet("foo", ns.Name)
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
ds, err := dsClient.Create(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DaemonSet: %v", err)
|
||||
}
|
||||
|
||||
defer cleanupDaemonSets(t, clientset, ds)
|
||||
|
||||
nodeWithTaint := newNode("node-with-taint", nil)
|
||||
nodeWithTaint.Spec.Taints = []v1.Taint{{Key: "key1", Value: "val1", Effect: "NoSchedule"}}
|
||||
_, err = nodeClient.Create(nodeWithTaint)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create nodeWithTaint: %v", err)
|
||||
}
|
||||
|
||||
nodeWithoutTaint := newNode("node-without-taint", nil)
|
||||
_, err = nodeClient.Create(nodeWithoutTaint)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create nodeWithoutTaint: %v", err)
|
||||
}
|
||||
|
||||
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t)
|
||||
validateDaemonSetStatus(dsClient, ds.Name, 1, t)
|
||||
|
||||
// remove taint from nodeWithTaint
|
||||
nodeWithTaint, err = nodeClient.Get("node-with-taint", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to retrieve nodeWithTaint: %v", err)
|
||||
}
|
||||
nodeWithTaintCopy := nodeWithTaint.DeepCopy()
|
||||
nodeWithTaintCopy.Spec.Taints = []v1.Taint{}
|
||||
_, err = nodeClient.Update(nodeWithTaintCopy)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to update nodeWithTaint: %v", err)
|
||||
}
|
||||
|
||||
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 2, t)
|
||||
validateDaemonSetStatus(dsClient, ds.Name, 2, t)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// TestUnschedulableNodeDaemonDoesLaunchPod tests that the DaemonSet Pods can still be scheduled
|
||||
// to the Unschedulable nodes when TaintNodesByCondition are enabled.
|
||||
func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) {
|
||||
enabledTaint := utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition)
|
||||
defer func() {
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
|
||||
features.TaintNodesByCondition, enabledTaint)); err != nil {
|
||||
t.Fatalf("Failed to set FeatureGate %v to %t", features.TaintNodesByCondition, enabledTaint)
|
||||
}
|
||||
}()
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.TaintNodesByCondition, true)); err != nil {
|
||||
t.Fatalf("Failed to set FeatureGate %v to %t", features.TaintNodesByCondition, true)
|
||||
}
|
||||
|
||||
forEachFeatureGate(t, func(t *testing.T) {
|
||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("daemonset-unschedulable-test", server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
||||
podClient := clientset.CoreV1().Pods(ns.Name)
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
podInformer := informers.Core().V1().Pods().Informer()
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
informers.Start(stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Start Scheduler
|
||||
setupScheduler(t, clientset, informers, stopCh)
|
||||
|
||||
ds := newDaemonSet("foo", ns.Name)
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
ds.Spec.Template.Spec.HostNetwork = true
|
||||
_, err := dsClient.Create(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DaemonSet: %v", err)
|
||||
}
|
||||
|
||||
defer cleanupDaemonSets(t, clientset, ds)
|
||||
|
||||
// Creates unschedulable node.
|
||||
node := newNode("unschedulable-node", nil)
|
||||
node.Spec.Unschedulable = true
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
|
||||
_, err = nodeClient.Create(node)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
// Creates network-unavailable node.
|
||||
nodeNU := newNode("network-unavailable-node", nil)
|
||||
nodeNU.Status.Conditions = []v1.NodeCondition{
|
||||
{Type: v1.NodeReady, Status: v1.ConditionFalse},
|
||||
{Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue},
|
||||
}
|
||||
nodeNU.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeNetworkUnavailable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
|
||||
_, err = nodeClient.Create(nodeNU)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 2, t)
|
||||
validateDaemonSetStatus(dsClient, ds.Name, 2, t)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
10
vendor/k8s.io/kubernetes/test/integration/defaulttolerationseconds/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/test/integration/defaulttolerationseconds/BUILD
generated
vendored
@@ -20,12 +20,12 @@ go_test(
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//plugin/pkg/admission/defaulttolerationseconds:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
35
vendor/k8s.io/kubernetes/test/integration/deployment/BUILD
generated
vendored
35
vendor/k8s.io/kubernetes/test/integration/deployment/BUILD
generated
vendored
@@ -17,15 +17,15 @@ go_test(
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -38,18 +38,17 @@ go_library(
|
||||
"//pkg/controller/deployment:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/replicaset:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/deployment/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/deployment/OWNERS
generated
vendored
@@ -5,3 +5,5 @@ reviewers:
|
||||
approvers:
|
||||
- janetkuo
|
||||
- kargakis
|
||||
labels:
|
||||
- sig/apps
|
||||
|
6
vendor/k8s.io/kubernetes/test/integration/deployment/deployment_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/integration/deployment/deployment_test.go
generated
vendored
@@ -30,8 +30,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/retry"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/util/pointer"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func TestNewDeployment(t *testing.T) {
|
||||
@@ -681,6 +681,10 @@ func checkRSHashLabels(rs *apps.ReplicaSet) (string, error) {
|
||||
return "", fmt.Errorf("unexpected replicaset %s missing required pod-template-hash labels", rs.Name)
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(rs.Name, hash) {
|
||||
return "", fmt.Errorf("unexpected replicaset %s name suffix doesn't match hash %s", rs.Name, hash)
|
||||
}
|
||||
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/deployment/util.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/deployment/util.go
generated
vendored
@@ -36,7 +36,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller/deployment"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/controller/replicaset"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testutil "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
@@ -156,7 +155,6 @@ func dcSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *replicaset.R
|
||||
resyncPeriod := 12 * time.Hour
|
||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "deployment-informers")), resyncPeriod)
|
||||
|
||||
metrics.UnregisterMetricAndUntrackRateLimiterUsage("deployment_controller")
|
||||
dc, err := deployment.NewDeploymentController(
|
||||
informers.Apps().V1().Deployments(),
|
||||
informers.Apps().V1().ReplicaSets(),
|
||||
|
57
vendor/k8s.io/kubernetes/test/integration/dryrun/BUILD
generated
vendored
Normal file
57
vendor/k8s.io/kubernetes/test/integration/dryrun/BUILD
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"dryrun_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
tags = [
|
||||
"etcd",
|
||||
"integration",
|
||||
],
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app:go_default_library",
|
||||
"//cmd/kube-apiserver/app/options:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/features:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery/cached:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
7
vendor/k8s.io/kubernetes/test/integration/dryrun/OWNERS
generated
vendored
Normal file
7
vendor/k8s.io/kubernetes/test/integration/dryrun/OWNERS
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
approvers:
|
||||
- apelisse
|
||||
- deads2k
|
||||
reviewers:
|
||||
- deads2k
|
||||
- liggitt
|
||||
- lavalamp
|
765
vendor/k8s.io/kubernetes/test/integration/dryrun/dryrun_test.go
generated
vendored
Normal file
765
vendor/k8s.io/kubernetes/test/integration/dryrun/dryrun_test.go
generated
vendored
Normal file
@@ -0,0 +1,765 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dryrun
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/features"
|
||||
genericapiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
cacheddiscovery "k8s.io/client-go/discovery/cached"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
||||
// install all APIs
|
||||
_ "k8s.io/kubernetes/pkg/master" // TODO what else is needed
|
||||
)
|
||||
|
||||
// dryrun data for all persisted objects.
|
||||
var dryrunData = map[schema.GroupVersionResource]struct {
|
||||
stub string // Valid JSON stub to use during create
|
||||
}{
|
||||
// k8s.io/kubernetes/pkg/api/v1
|
||||
gvr("", "v1", "configmaps"): {
|
||||
stub: `{"data": {"foo": "bar"}, "metadata": {"name": "cm1"}}`,
|
||||
},
|
||||
gvr("", "v1", "services"): {
|
||||
stub: `{"metadata": {"name": "service1"}, "spec": {"externalName": "service1name", "ports": [{"port": 10000, "targetPort": 11000}], "selector": {"test": "data"}}}`,
|
||||
},
|
||||
gvr("", "v1", "podtemplates"): {
|
||||
stub: `{"metadata": {"name": "pt1name"}, "template": {"metadata": {"labels": {"pt": "01"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container9"}]}}}`,
|
||||
},
|
||||
gvr("", "v1", "pods"): {
|
||||
stub: `{"metadata": {"name": "pod1"}, "spec": {"containers": [{"image": "fedora:latest", "name": "container7", "resources": {"limits": {"cpu": "1M"}, "requests": {"cpu": "1M"}}}]}}`,
|
||||
},
|
||||
gvr("", "v1", "endpoints"): {
|
||||
stub: `{"metadata": {"name": "ep1name"}, "subsets": [{"addresses": [{"hostname": "bar-001", "ip": "192.168.3.1"}], "ports": [{"port": 8000}]}]}`,
|
||||
},
|
||||
gvr("", "v1", "resourcequotas"): {
|
||||
stub: `{"metadata": {"name": "rq1name"}, "spec": {"hard": {"cpu": "5M"}}}`,
|
||||
},
|
||||
gvr("", "v1", "limitranges"): {
|
||||
stub: `{"metadata": {"name": "lr1name"}, "spec": {"limits": [{"type": "Pod"}]}}`,
|
||||
},
|
||||
gvr("", "v1", "namespaces"): {
|
||||
stub: `{"metadata": {"name": "namespace2"}, "spec": {"finalizers": ["kubernetes"]}}`,
|
||||
},
|
||||
gvr("", "v1", "nodes"): {
|
||||
stub: `{"metadata": {"name": "node1"}, "spec": {"unschedulable": true}}`,
|
||||
},
|
||||
gvr("", "v1", "persistentvolumes"): {
|
||||
stub: `{"metadata": {"name": "pv1name"}, "spec": {"accessModes": ["ReadWriteOnce"], "capacity": {"storage": "3M"}, "hostPath": {"path": "/tmp/test/"}}}`,
|
||||
},
|
||||
gvr("", "v1", "events"): {
|
||||
stub: `{"involvedObject": {"namespace": "dryrunnamespace"}, "message": "some data here", "metadata": {"name": "event1"}}`,
|
||||
},
|
||||
gvr("", "v1", "persistentvolumeclaims"): {
|
||||
stub: `{"metadata": {"name": "pvc1"}, "spec": {"accessModes": ["ReadWriteOnce"], "resources": {"limits": {"storage": "1M"}, "requests": {"storage": "2M"}}, "selector": {"matchLabels": {"pvc": "stuff"}}}}`,
|
||||
},
|
||||
gvr("", "v1", "serviceaccounts"): {
|
||||
stub: `{"metadata": {"name": "sa1name"}, "secrets": [{"name": "secret00"}]}`,
|
||||
},
|
||||
gvr("", "v1", "secrets"): {
|
||||
stub: `{"data": {"key": "ZGF0YSBmaWxl"}, "metadata": {"name": "secret1"}}`,
|
||||
},
|
||||
gvr("", "v1", "replicationcontrollers"): {
|
||||
stub: `{"metadata": {"name": "rc1"}, "spec": {"selector": {"new": "stuff"}, "template": {"metadata": {"labels": {"new": "stuff"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container8"}]}}}}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/apps/v1beta1
|
||||
gvr("apps", "v1beta1", "statefulsets"): {
|
||||
stub: `{"metadata": {"name": "ss1"}, "spec": {"selector": {"matchLabels": {"a": "b"}}, "template": {"metadata": {"labels": {"a": "b"}}}}}`,
|
||||
},
|
||||
gvr("apps", "v1beta1", "deployments"): {
|
||||
stub: `{"metadata": {"name": "deployment2"}, "spec": {"selector": {"matchLabels": {"f": "z"}}, "template": {"metadata": {"labels": {"f": "z"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container6"}]}}}}`,
|
||||
},
|
||||
gvr("apps", "v1beta1", "controllerrevisions"): {
|
||||
stub: `{"metadata":{"name":"crs1"},"data":{"name":"abc","namespace":"default","creationTimestamp":null,"Spec":{"Replicas":0,"Selector":{"matchLabels":{"foo":"bar"}},"Template":{"creationTimestamp":null,"labels":{"foo":"bar"},"Spec":{"Volumes":null,"InitContainers":null,"Containers":null,"RestartPolicy":"Always","TerminationGracePeriodSeconds":null,"ActiveDeadlineSeconds":null,"DNSPolicy":"ClusterFirst","NodeSelector":null,"ServiceAccountName":"","AutomountServiceAccountToken":null,"NodeName":"","SecurityContext":null,"ImagePullSecrets":null,"Hostname":"","Subdomain":"","Affinity":null,"SchedulerName":"","Tolerations":null,"HostAliases":null}},"VolumeClaimTemplates":null,"ServiceName":""},"Status":{"ObservedGeneration":null,"Replicas":0}},"revision":0}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/apps/v1beta2
|
||||
gvr("apps", "v1beta2", "statefulsets"): {
|
||||
stub: `{"metadata": {"name": "ss2"}, "spec": {"selector": {"matchLabels": {"a": "b"}}, "template": {"metadata": {"labels": {"a": "b"}}}}}`,
|
||||
},
|
||||
gvr("apps", "v1beta2", "deployments"): {
|
||||
stub: `{"metadata": {"name": "deployment3"}, "spec": {"selector": {"matchLabels": {"f": "z"}}, "template": {"metadata": {"labels": {"f": "z"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container6"}]}}}}`,
|
||||
},
|
||||
gvr("apps", "v1beta2", "daemonsets"): {
|
||||
stub: `{"metadata": {"name": "ds5"}, "spec": {"selector": {"matchLabels": {"a": "b"}}, "template": {"metadata": {"labels": {"a": "b"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container6"}]}}}}`,
|
||||
},
|
||||
gvr("apps", "v1beta2", "replicasets"): {
|
||||
stub: `{"metadata": {"name": "rs2"}, "spec": {"selector": {"matchLabels": {"g": "h"}}, "template": {"metadata": {"labels": {"g": "h"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container4"}]}}}}`,
|
||||
},
|
||||
gvr("apps", "v1beta2", "controllerrevisions"): {
|
||||
stub: `{"metadata":{"name":"crs2"},"data":{"name":"abc","namespace":"default","creationTimestamp":null,"Spec":{"Replicas":0,"Selector":{"matchLabels":{"foo":"bar"}},"Template":{"creationTimestamp":null,"labels":{"foo":"bar"},"Spec":{"Volumes":null,"InitContainers":null,"Containers":null,"RestartPolicy":"Always","TerminationGracePeriodSeconds":null,"ActiveDeadlineSeconds":null,"DNSPolicy":"ClusterFirst","NodeSelector":null,"ServiceAccountName":"","AutomountServiceAccountToken":null,"NodeName":"","SecurityContext":null,"ImagePullSecrets":null,"Hostname":"","Subdomain":"","Affinity":null,"SchedulerName":"","Tolerations":null,"HostAliases":null}},"VolumeClaimTemplates":null,"ServiceName":""},"Status":{"ObservedGeneration":null,"Replicas":0}},"revision":0}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/apps/v1
|
||||
gvr("apps", "v1", "daemonsets"): {
|
||||
stub: `{"metadata": {"name": "ds6"}, "spec": {"selector": {"matchLabels": {"a": "b"}}, "template": {"metadata": {"labels": {"a": "b"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container6"}]}}}}`,
|
||||
},
|
||||
gvr("apps", "v1", "deployments"): {
|
||||
stub: `{"metadata": {"name": "deployment4"}, "spec": {"selector": {"matchLabels": {"f": "z"}}, "template": {"metadata": {"labels": {"f": "z"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container6"}]}}}}`,
|
||||
},
|
||||
gvr("apps", "v1", "statefulsets"): {
|
||||
stub: `{"metadata": {"name": "ss3"}, "spec": {"selector": {"matchLabels": {"a": "b"}}, "template": {"metadata": {"labels": {"a": "b"}}}}}`,
|
||||
},
|
||||
gvr("apps", "v1", "replicasets"): {
|
||||
stub: `{"metadata": {"name": "rs3"}, "spec": {"selector": {"matchLabels": {"g": "h"}}, "template": {"metadata": {"labels": {"g": "h"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container4"}]}}}}`,
|
||||
},
|
||||
gvr("apps", "v1", "controllerrevisions"): {
|
||||
stub: `{"metadata":{"name":"crs3"},"data":{"name":"abc","namespace":"default","creationTimestamp":null,"Spec":{"Replicas":0,"Selector":{"matchLabels":{"foo":"bar"}},"Template":{"creationTimestamp":null,"labels":{"foo":"bar"},"Spec":{"Volumes":null,"InitContainers":null,"Containers":null,"RestartPolicy":"Always","TerminationGracePeriodSeconds":null,"ActiveDeadlineSeconds":null,"DNSPolicy":"ClusterFirst","NodeSelector":null,"ServiceAccountName":"","AutomountServiceAccountToken":null,"NodeName":"","SecurityContext":null,"ImagePullSecrets":null,"Hostname":"","Subdomain":"","Affinity":null,"SchedulerName":"","Tolerations":null,"HostAliases":null}},"VolumeClaimTemplates":null,"ServiceName":""},"Status":{"ObservedGeneration":null,"Replicas":0}},"revision":0}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/autoscaling/v1
|
||||
gvr("autoscaling", "v1", "horizontalpodautoscalers"): {
|
||||
stub: `{"metadata": {"name": "hpa2"}, "spec": {"maxReplicas": 3, "scaleTargetRef": {"kind": "something", "name": "cross"}}}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1
|
||||
gvr("autoscaling", "v2beta1", "horizontalpodautoscalers"): {
|
||||
stub: `{"metadata": {"name": "hpa1"}, "spec": {"maxReplicas": 3, "scaleTargetRef": {"kind": "something", "name": "cross"}}}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2
|
||||
gvr("autoscaling", "v2beta2", "horizontalpodautoscalers"): {
|
||||
stub: `{"metadata": {"name": "hpa3"}, "spec": {"maxReplicas": 3, "scaleTargetRef": {"kind": "something", "name": "cross"}}}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/batch/v1
|
||||
gvr("batch", "v1", "jobs"): {
|
||||
stub: `{"metadata": {"name": "job1"}, "spec": {"manualSelector": true, "selector": {"matchLabels": {"controller-uid": "uid1"}}, "template": {"metadata": {"labels": {"controller-uid": "uid1"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container1"}], "dnsPolicy": "ClusterFirst", "restartPolicy": "Never"}}}}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/batch/v1beta1
|
||||
gvr("batch", "v1beta1", "cronjobs"): {
|
||||
stub: `{"metadata": {"name": "cjv1beta1"}, "spec": {"jobTemplate": {"spec": {"template": {"metadata": {"labels": {"controller-uid": "uid0"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container0"}], "dnsPolicy": "ClusterFirst", "restartPolicy": "Never"}}}}, "schedule": "* * * * *"}}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/batch/v2alpha1
|
||||
gvr("batch", "v2alpha1", "cronjobs"): {
|
||||
stub: `{"metadata": {"name": "cjv2alpha1"}, "spec": {"jobTemplate": {"spec": {"template": {"metadata": {"labels": {"controller-uid": "uid0"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container0"}], "dnsPolicy": "ClusterFirst", "restartPolicy": "Never"}}}}, "schedule": "* * * * *"}}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/certificates/v1beta1
|
||||
gvr("certificates.k8s.io", "v1beta1", "certificatesigningrequests"): {
|
||||
stub: `{"metadata": {"name": "csr1"}, "spec": {"request": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQnlqQ0NBVE1DQVFBd2dZa3hDekFKQmdOVkJBWVRBbFZUTVJNd0VRWURWUVFJRXdwRFlXeHBabTl5Ym1saApNUll3RkFZRFZRUUhFdzFOYjNWdWRHRnBiaUJXYVdWM01STXdFUVlEVlFRS0V3cEhiMjluYkdVZ1NXNWpNUjh3CkhRWURWUVFMRXhaSmJtWnZjbTFoZEdsdmJpQlVaV05vYm05c2IyZDVNUmN3RlFZRFZRUURFdzUzZDNjdVoyOXYKWjJ4bExtTnZiVENCbnpBTkJna3Foa2lHOXcwQkFRRUZBQU9CalFBd2dZa0NnWUVBcFp0WUpDSEo0VnBWWEhmVgpJbHN0UVRsTzRxQzAzaGpYK1prUHl2ZFlkMVE0K3FiQWVUd1htQ1VLWUhUaFZSZDVhWFNxbFB6eUlCd2llTVpyCldGbFJRZGRaMUl6WEFsVlJEV3dBbzYwS2VjcWVBWG5uVUsrNWZYb1RJL1VnV3NocmU4dEoreC9UTUhhUUtSL0oKY0lXUGhxYVFoc0p1elpidkFkR0E4MEJMeGRNQ0F3RUFBYUFBTUEwR0NTcUdTSWIzRFFFQkJRVUFBNEdCQUlobAo0UHZGcStlN2lwQVJnSTVaTStHWng2bXBDejQ0RFRvMEprd2ZSRGYrQnRyc2FDMHE2OGVUZjJYaFlPc3E0ZmtIClEwdUEwYVZvZzNmNWlKeENhM0hwNWd4YkpRNnpWNmtKMFRFc3VhYU9oRWtvOXNkcENvUE9uUkJtMmkvWFJEMkQKNmlOaDhmOHowU2hHc0ZxakRnRkh5RjNvK2xVeWorVUM2SDFRVzdibgotLS0tLUVORCBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0="}}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/coordination/v1beta1
|
||||
gvr("coordination.k8s.io", "v1beta1", "leases"): {
|
||||
stub: `{"metadata": {"name": "lease1"}, "spec": {"holderIdentity": "holder", "leaseDurationSeconds": 5}}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/events/v1beta1
|
||||
gvr("events.k8s.io", "v1beta1", "events"): {
|
||||
stub: `{"metadata": {"name": "event2"}, "regarding": {"namespace": "dryrunnamespace"}, "note": "some data here", "eventTime": "2017-08-09T15:04:05.000000Z", "reportingInstance": "node-xyz", "reportingController": "k8s.io/my-controller", "action": "DidNothing", "reason": "Laziness"}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/extensions/v1beta1
|
||||
gvr("extensions", "v1beta1", "daemonsets"): {
|
||||
stub: `{"metadata": {"name": "ds1"}, "spec": {"selector": {"matchLabels": {"u": "t"}}, "template": {"metadata": {"labels": {"u": "t"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container5"}]}}}}`,
|
||||
},
|
||||
gvr("extensions", "v1beta1", "podsecuritypolicies"): {
|
||||
stub: `{"metadata": {"name": "psp1"}, "spec": {"fsGroup": {"rule": "RunAsAny"}, "privileged": true, "runAsUser": {"rule": "RunAsAny"}, "seLinux": {"rule": "MustRunAs"}, "supplementalGroups": {"rule": "RunAsAny"}}}`,
|
||||
},
|
||||
gvr("extensions", "v1beta1", "ingresses"): {
|
||||
stub: `{"metadata": {"name": "ingress1"}, "spec": {"backend": {"serviceName": "service", "servicePort": 5000}}}`,
|
||||
},
|
||||
gvr("extensions", "v1beta1", "networkpolicies"): {
|
||||
stub: `{"metadata": {"name": "np1"}, "spec": {"podSelector": {"matchLabels": {"e": "f"}}}}`,
|
||||
},
|
||||
gvr("extensions", "v1beta1", "deployments"): {
|
||||
stub: `{"metadata": {"name": "deployment1"}, "spec": {"selector": {"matchLabels": {"f": "z"}}, "template": {"metadata": {"labels": {"f": "z"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container6"}]}}}}`,
|
||||
},
|
||||
gvr("extensions", "v1beta1", "replicasets"): {
|
||||
stub: `{"metadata": {"name": "rs1"}, "spec": {"selector": {"matchLabels": {"g": "h"}}, "template": {"metadata": {"labels": {"g": "h"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container4"}]}}}}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/networking/v1
|
||||
gvr("networking.k8s.io", "v1", "networkpolicies"): {
|
||||
stub: `{"metadata": {"name": "np2"}, "spec": {"podSelector": {"matchLabels": {"e": "f"}}}}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/policy/v1beta1
|
||||
gvr("policy", "v1beta1", "poddisruptionbudgets"): {
|
||||
stub: `{"metadata": {"name": "pdb1"}, "spec": {"selector": {"matchLabels": {"anokkey": "anokvalue"}}}}`,
|
||||
},
|
||||
gvr("policy", "v1beta1", "podsecuritypolicies"): {
|
||||
stub: `{"metadata": {"name": "psp2"}, "spec": {"fsGroup": {"rule": "RunAsAny"}, "privileged": true, "runAsUser": {"rule": "RunAsAny"}, "seLinux": {"rule": "MustRunAs"}, "supplementalGroups": {"rule": "RunAsAny"}}}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/storage/v1alpha1
|
||||
gvr("storage.k8s.io", "v1alpha1", "volumeattachments"): {
|
||||
stub: `{"metadata": {"name": "va1"}, "spec": {"attacher": "gce", "nodeName": "localhost", "source": {"persistentVolumeName": "pv1"}}}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/storage/v1beta1
|
||||
gvr("storage.k8s.io", "v1beta1", "volumeattachments"): {
|
||||
stub: `{"metadata": {"name": "va2"}, "spec": {"attacher": "gce", "nodeName": "localhost", "source": {"persistentVolumeName": "pv2"}}}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/storage/v1beta1
|
||||
gvr("storage.k8s.io", "v1beta1", "storageclasses"): {
|
||||
stub: `{"metadata": {"name": "sc1"}, "provisioner": "aws"}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/storage/v1
|
||||
gvr("storage.k8s.io", "v1", "storageclasses"): {
|
||||
stub: `{"metadata": {"name": "sc2"}, "provisioner": "aws"}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/settings/v1alpha1
|
||||
gvr("settings.k8s.io", "v1alpha1", "podpresets"): {
|
||||
stub: `{"metadata": {"name": "podpre1"}, "spec": {"env": [{"name": "FOO"}]}}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/rbac/v1alpha1
|
||||
gvr("rbac.authorization.k8s.io", "v1alpha1", "roles"): {
|
||||
stub: `{"metadata": {"name": "role1"}, "rules": [{"apiGroups": ["v1"], "resources": ["events"], "verbs": ["watch"]}]}`,
|
||||
},
|
||||
gvr("rbac.authorization.k8s.io", "v1alpha1", "clusterroles"): {
|
||||
stub: `{"metadata": {"name": "drcrole1"}, "rules": [{"nonResourceURLs": ["/version"], "verbs": ["get"]}]}`,
|
||||
},
|
||||
gvr("rbac.authorization.k8s.io", "v1alpha1", "rolebindings"): {
|
||||
stub: `{"metadata": {"name": "drroleb1"}, "roleRef": {"apiGroup": "rbac.authorization.k8s.io", "kind": "ClusterRole", "name": "somecr"}, "subjects": [{"apiVersion": "rbac.authorization.k8s.io/v1alpha1", "kind": "Group", "name": "system:authenticated"}]}`,
|
||||
},
|
||||
gvr("rbac.authorization.k8s.io", "v1alpha1", "clusterrolebindings"): {
|
||||
stub: `{"metadata": {"name": "drcroleb1"}, "roleRef": {"apiGroup": "rbac.authorization.k8s.io", "kind": "ClusterRole", "name": "somecr"}, "subjects": [{"apiVersion": "rbac.authorization.k8s.io/v1alpha1", "kind": "Group", "name": "system:authenticated"}]}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/rbac/v1beta1
|
||||
gvr("rbac.authorization.k8s.io", "v1beta1", "roles"): {
|
||||
stub: `{"metadata": {"name": "drrole2"}, "rules": [{"apiGroups": ["v1"], "resources": ["events"], "verbs": ["watch"]}]}`,
|
||||
},
|
||||
gvr("rbac.authorization.k8s.io", "v1beta1", "clusterroles"): {
|
||||
stub: `{"metadata": {"name": "drcrole2"}, "rules": [{"nonResourceURLs": ["/version"], "verbs": ["get"]}]}`,
|
||||
},
|
||||
gvr("rbac.authorization.k8s.io", "v1beta1", "rolebindings"): {
|
||||
stub: `{"metadata": {"name": "drroleb2"}, "roleRef": {"apiGroup": "rbac.authorization.k8s.io", "kind": "ClusterRole", "name": "somecr"}, "subjects": [{"apiVersion": "rbac.authorization.k8s.io/v1alpha1", "kind": "Group", "name": "system:authenticated"}]}`,
|
||||
},
|
||||
gvr("rbac.authorization.k8s.io", "v1beta1", "clusterrolebindings"): {
|
||||
stub: `{"metadata": {"name": "drcroleb2"}, "roleRef": {"apiGroup": "rbac.authorization.k8s.io", "kind": "ClusterRole", "name": "somecr"}, "subjects": [{"apiVersion": "rbac.authorization.k8s.io/v1alpha1", "kind": "Group", "name": "system:authenticated"}]}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/rbac/v1
|
||||
gvr("rbac.authorization.k8s.io", "v1", "roles"): {
|
||||
stub: `{"metadata": {"name": "drrole3"}, "rules": [{"apiGroups": ["v1"], "resources": ["events"], "verbs": ["watch"]}]}`,
|
||||
},
|
||||
gvr("rbac.authorization.k8s.io", "v1", "clusterroles"): {
|
||||
stub: `{"metadata": {"name": "drcrole3"}, "rules": [{"nonResourceURLs": ["/version"], "verbs": ["get"]}]}`,
|
||||
},
|
||||
gvr("rbac.authorization.k8s.io", "v1", "rolebindings"): {
|
||||
stub: `{"metadata": {"name": "drroleb3"}, "roleRef": {"apiGroup": "rbac.authorization.k8s.io", "kind": "ClusterRole", "name": "somecr"}, "subjects": [{"apiVersion": "rbac.authorization.k8s.io/v1alpha1", "kind": "Group", "name": "system:authenticated"}]}`,
|
||||
},
|
||||
gvr("rbac.authorization.k8s.io", "v1", "clusterrolebindings"): {
|
||||
stub: `{"metadata": {"name": "drcroleb3"}, "roleRef": {"apiGroup": "rbac.authorization.k8s.io", "kind": "ClusterRole", "name": "somecr"}, "subjects": [{"apiVersion": "rbac.authorization.k8s.io/v1alpha1", "kind": "Group", "name": "system:authenticated"}]}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1
|
||||
gvr("admissionregistration.k8s.io", "v1alpha1", "initializerconfigurations"): {
|
||||
stub: `{"metadata":{"name":"ic1"},"initializers":[{"name":"initializer.k8s.io","rules":[{"apiGroups":["group"],"apiVersions":["version"],"resources":["resource"]}],"failurePolicy":"Ignore"}]}`,
|
||||
},
|
||||
// k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1
|
||||
gvr("admissionregistration.k8s.io", "v1beta1", "validatingwebhookconfigurations"): {
|
||||
stub: `{"metadata":{"name":"hook1","creationTimestamp":null},"webhooks":[{"name":"externaladmissionhook.k8s.io","clientConfig":{"service":{"namespace":"ns","name":"n"},"caBundle":null},"rules":[{"operations":["CREATE"],"apiGroups":["group"],"apiVersions":["version"],"resources":["resource"]}],"failurePolicy":"Ignore"}]}`,
|
||||
},
|
||||
gvr("admissionregistration.k8s.io", "v1beta1", "mutatingwebhookconfigurations"): {
|
||||
stub: `{"metadata":{"name":"hook1","creationTimestamp":null},"webhooks":[{"name":"externaladmissionhook.k8s.io","clientConfig":{"service":{"namespace":"ns","name":"n"},"caBundle":null},"rules":[{"operations":["CREATE"],"apiGroups":["group"],"apiVersions":["version"],"resources":["resource"]}],"failurePolicy":"Ignore"}]}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1
|
||||
gvr("scheduling.k8s.io", "v1alpha1", "priorityclasses"): {
|
||||
stub: `{"metadata":{"name":"pc1"},"Value":1000}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/scheduling/v1beta1
|
||||
gvr("scheduling.k8s.io", "v1beta1", "priorityclasses"): {
|
||||
stub: `{"metadata":{"name":"pc2"},"Value":1000}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1
|
||||
// depends on aggregator using the same ungrouped RESTOptionsGetter as the kube apiserver, not SimpleRestOptionsFactory in aggregator.go
|
||||
gvr("apiregistration.k8s.io", "v1beta1", "apiservices"): {
|
||||
stub: `{"metadata": {"name": "dras1.foo.com"}, "spec": {"group": "foo.com", "version": "dras1", "groupPriorityMinimum":100, "versionPriority":10}}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
|
||||
// depends on aggregator using the same ungrouped RESTOptionsGetter as the kube apiserver, not SimpleRestOptionsFactory in aggregator.go
|
||||
gvr("apiregistration.k8s.io", "v1", "apiservices"): {
|
||||
stub: `{"metadata": {"name": "dras2.foo.com"}, "spec": {"group": "foo.com", "version": "dras2", "groupPriorityMinimum":100, "versionPriority":10}}`,
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1
|
||||
gvr("apiextensions.k8s.io", "v1beta1", "customresourcedefinitions"): {
|
||||
stub: `{"metadata": {"name": "openshiftwebconsoleconfigs.webconsole.operator.openshift.io"},"spec": {"scope": "Cluster","group": "webconsole.operator.openshift.io","version": "v1alpha1","names": {"kind": "OpenShiftWebConsoleConfig","plural": "openshiftwebconsoleconfigs","singular": "openshiftwebconsoleconfig"}}}`,
|
||||
},
|
||||
// --
|
||||
|
||||
}
|
||||
|
||||
// Only add kinds to this list when this a virtual resource with get and create verbs that doesn't actually
|
||||
// store into it's kind. We've used this downstream for mappings before.
|
||||
var kindWhiteList = sets.NewString()
|
||||
|
||||
// namespace used for all tests, do not change this
|
||||
const testNamespace = "dryrunnamespace"
|
||||
|
||||
func DryRunCreateTest(t *testing.T, rsc dynamic.ResourceInterface, obj *unstructured.Unstructured, gvResource schema.GroupVersionResource) {
|
||||
createdObj, err := rsc.Create(obj, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dry-run create stub for %s: %#v", gvResource, err)
|
||||
}
|
||||
if obj.GroupVersionKind() != createdObj.GroupVersionKind() {
|
||||
t.Fatalf("created object doesn't have the same gvk as original object: got %v, expected %v",
|
||||
createdObj.GroupVersionKind(),
|
||||
obj.GroupVersionKind())
|
||||
}
|
||||
|
||||
if _, err := rsc.Get(obj.GetName(), metav1.GetOptions{}); !errors.IsNotFound(err) {
|
||||
t.Fatalf("object shouldn't exist: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func DryRunPatchTest(t *testing.T, rsc dynamic.ResourceInterface, name string) {
|
||||
patch := []byte(`{"metadata":{"annotations":{"patch": "true"}}}`)
|
||||
obj, err := rsc.Patch(name, types.MergePatchType, patch, metav1.UpdateOptions{DryRun: []string{metav1.DryRunAll}})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dry-run patch object: %v", err)
|
||||
}
|
||||
if v := obj.GetAnnotations()["patch"]; v != "true" {
|
||||
t.Fatalf("dry-run patched annotations should be returned, got: %v", obj.GetAnnotations())
|
||||
}
|
||||
obj, err = rsc.Get(obj.GetName(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get object: %v", err)
|
||||
}
|
||||
if v := obj.GetAnnotations()["patch"]; v == "true" {
|
||||
t.Fatalf("dry-run patched annotations should not be persisted, got: %v", obj.GetAnnotations())
|
||||
}
|
||||
}
|
||||
|
||||
func getReplicasOrFail(t *testing.T, obj *unstructured.Unstructured) int64 {
|
||||
t.Helper()
|
||||
replicas, found, err := unstructured.NestedInt64(obj.UnstructuredContent(), "spec", "replicas")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get int64 for replicas: %v", err)
|
||||
}
|
||||
if !found {
|
||||
t.Fatal("object doesn't have spec.replicas")
|
||||
}
|
||||
return replicas
|
||||
}
|
||||
|
||||
func setReplicasOrFail(t *testing.T, obj *unstructured.Unstructured, replicas int64) {
|
||||
m, found, err := unstructured.NestedMap(obj.UnstructuredContent(), "spec")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get spec: %v", err)
|
||||
}
|
||||
if !found {
|
||||
t.Fatal("object doesn't have spec")
|
||||
}
|
||||
m["replicas"] = replicas
|
||||
unstructured.SetNestedMap(obj.UnstructuredContent(), m, "spec")
|
||||
}
|
||||
|
||||
func DryRunScalePatchTest(t *testing.T, rsc dynamic.ResourceInterface, name string) {
|
||||
obj, err := rsc.Get(name, metav1.GetOptions{}, "scale")
|
||||
if errors.IsNotFound(err) {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get object: %v", err)
|
||||
}
|
||||
|
||||
replicas := getReplicasOrFail(t, obj)
|
||||
patch := []byte(`{"spec":{"replicas":10}}`)
|
||||
patchedObj, err := rsc.Patch(name, types.MergePatchType, patch, metav1.UpdateOptions{DryRun: []string{metav1.DryRunAll}}, "scale")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dry-run patch object: %v", err)
|
||||
}
|
||||
if newReplicas := getReplicasOrFail(t, patchedObj); newReplicas != 10 {
|
||||
t.Fatalf("dry-run patch to replicas didn't return new value: %v", newReplicas)
|
||||
}
|
||||
persistedObj, err := rsc.Get(name, metav1.GetOptions{}, "scale")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get scale sub-resource")
|
||||
}
|
||||
if newReplicas := getReplicasOrFail(t, persistedObj); newReplicas != replicas {
|
||||
t.Fatalf("number of replicas changed, expected %v, got %v", replicas, newReplicas)
|
||||
}
|
||||
}
|
||||
|
||||
func DryRunScaleUpdateTest(t *testing.T, rsc dynamic.ResourceInterface, name string) {
|
||||
obj, err := rsc.Get(name, metav1.GetOptions{}, "scale")
|
||||
if errors.IsNotFound(err) {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get object: %v", err)
|
||||
}
|
||||
|
||||
replicas := getReplicasOrFail(t, obj)
|
||||
unstructured.SetNestedField(obj.Object, int64(10), "spec", "replicas")
|
||||
updatedObj, err := rsc.Update(obj, metav1.UpdateOptions{DryRun: []string{metav1.DryRunAll}}, "scale")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dry-run update scale sub-resource: %v", err)
|
||||
}
|
||||
if newReplicas := getReplicasOrFail(t, updatedObj); newReplicas != 10 {
|
||||
t.Fatalf("dry-run update to replicas didn't return new value: %v", newReplicas)
|
||||
}
|
||||
persistedObj, err := rsc.Get(name, metav1.GetOptions{}, "scale")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get scale sub-resource")
|
||||
}
|
||||
if newReplicas := getReplicasOrFail(t, persistedObj); newReplicas != replicas {
|
||||
t.Fatalf("number of replicas changed, expected %v, got %v", replicas, newReplicas)
|
||||
}
|
||||
}
|
||||
|
||||
func DryRunUpdateTest(t *testing.T, rsc dynamic.ResourceInterface, name string) {
|
||||
var err error
|
||||
var obj *unstructured.Unstructured
|
||||
for i := 0; i < 3; i++ {
|
||||
obj, err = rsc.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to retrieve object: %v", err)
|
||||
}
|
||||
obj.SetAnnotations(map[string]string{"update": "true"})
|
||||
obj, err = rsc.Update(obj, metav1.UpdateOptions{DryRun: []string{metav1.DryRunAll}})
|
||||
if err == nil || !errors.IsConflict(err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dry-run update resource: %v", err)
|
||||
}
|
||||
if v := obj.GetAnnotations()["update"]; v != "true" {
|
||||
t.Fatalf("dry-run updated annotations should be returned, got: %v", obj.GetAnnotations())
|
||||
}
|
||||
|
||||
obj, err = rsc.Get(obj.GetName(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get object: %v", err)
|
||||
}
|
||||
if v := obj.GetAnnotations()["update"]; v == "true" {
|
||||
t.Fatalf("dry-run updated annotations should not be persisted, got: %v", obj.GetAnnotations())
|
||||
}
|
||||
}
|
||||
|
||||
func DryRunDeleteCollectionTest(t *testing.T, rsc dynamic.ResourceInterface, name string) {
|
||||
err := rsc.DeleteCollection(&metav1.DeleteOptions{DryRun: []string{metav1.DryRunAll}}, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("dry-run delete collection failed: %v", err)
|
||||
}
|
||||
obj, err := rsc.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get object: %v", err)
|
||||
}
|
||||
ts := obj.GetDeletionTimestamp()
|
||||
if ts != nil {
|
||||
t.Fatalf("object has a deletion timestamp after dry-run delete collection")
|
||||
}
|
||||
}
|
||||
|
||||
func DryRunDeleteTest(t *testing.T, rsc dynamic.ResourceInterface, name string) {
|
||||
err := rsc.Delete(name, &metav1.DeleteOptions{DryRun: []string{metav1.DryRunAll}})
|
||||
if err != nil {
|
||||
t.Fatalf("dry-run delete failed: %v", err)
|
||||
}
|
||||
obj, err := rsc.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get object: %v", err)
|
||||
}
|
||||
ts := obj.GetDeletionTimestamp()
|
||||
if ts != nil {
|
||||
t.Fatalf("object has a deletion timestamp after dry-run delete")
|
||||
}
|
||||
}
|
||||
|
||||
// TestDryRun tests dry-run on all types.
|
||||
func TestDryRun(t *testing.T) {
|
||||
certDir, _ := ioutil.TempDir("", "test-integration-dryrun")
|
||||
defer os.RemoveAll(certDir)
|
||||
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DryRun, true)()
|
||||
clientConfig := startRealMasterOrDie(t, certDir)
|
||||
dClient := dynamic.NewForConfigOrDie(clientConfig)
|
||||
kubeClient := clientset.NewForConfigOrDie(clientConfig)
|
||||
if _, err := kubeClient.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
discoveryClient := cacheddiscovery.NewMemCacheClient(kubeClient.Discovery())
|
||||
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient)
|
||||
restMapper.Reset()
|
||||
|
||||
serverResources, err := kubeClient.Discovery().ServerResources()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resourcesToTest := getResourcesToTest(serverResources, false, t)
|
||||
|
||||
for _, resourceToTest := range resourcesToTest {
|
||||
t.Run(resourceToTest.gvr.String(), func(t *testing.T) {
|
||||
gvk := resourceToTest.gvk
|
||||
gvResource := resourceToTest.gvr
|
||||
kind := gvk.Kind
|
||||
|
||||
mapping := &meta.RESTMapping{
|
||||
Resource: resourceToTest.gvr,
|
||||
GroupVersionKind: resourceToTest.gvk,
|
||||
Scope: meta.RESTScopeRoot,
|
||||
}
|
||||
if resourceToTest.namespaced {
|
||||
mapping.Scope = meta.RESTScopeNamespace
|
||||
}
|
||||
|
||||
if kindWhiteList.Has(kind) {
|
||||
t.Skip("whitelisted")
|
||||
}
|
||||
|
||||
testData, hasTest := dryrunData[gvResource]
|
||||
|
||||
if !hasTest {
|
||||
t.Fatalf("no test data for %s. Please add a test for your new type to dryrunData.", gvResource)
|
||||
}
|
||||
|
||||
// we don't require GVK on the data we provide, so we fill it in here. We could, but that seems extraneous.
|
||||
typeMetaAdder := map[string]interface{}{}
|
||||
err := json.Unmarshal([]byte(testData.stub), &typeMetaAdder)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to unmarshal stub (%v): %v", testData.stub, err)
|
||||
}
|
||||
typeMetaAdder["apiVersion"] = mapping.GroupVersionKind.GroupVersion().String()
|
||||
typeMetaAdder["kind"] = mapping.GroupVersionKind.Kind
|
||||
|
||||
rsc := dClient.Resource(mapping.Resource).Namespace(testNamespace)
|
||||
if mapping.Scope == meta.RESTScopeRoot {
|
||||
rsc = dClient.Resource(mapping.Resource)
|
||||
}
|
||||
obj := &unstructured.Unstructured{Object: typeMetaAdder}
|
||||
name := obj.GetName()
|
||||
|
||||
DryRunCreateTest(t, rsc, obj, gvResource)
|
||||
|
||||
if _, err := rsc.Create(obj, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("failed to create stub for %s: %#v", gvResource, err)
|
||||
}
|
||||
|
||||
DryRunUpdateTest(t, rsc, name)
|
||||
DryRunPatchTest(t, rsc, name)
|
||||
DryRunScalePatchTest(t, rsc, name)
|
||||
DryRunScaleUpdateTest(t, rsc, name)
|
||||
if resourceToTest.hasDeleteCollection {
|
||||
DryRunDeleteCollectionTest(t, rsc, name)
|
||||
}
|
||||
DryRunDeleteTest(t, rsc, name)
|
||||
|
||||
if err = rsc.Delete(obj.GetName(), metav1.NewDeleteOptions(0)); err != nil {
|
||||
t.Fatalf("deleting final object failed: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func startRealMasterOrDie(t *testing.T, certDir string) *restclient.Config {
|
||||
_, defaultServiceClusterIPRange, err := net.ParseCIDR("10.0.0.0/24")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
listener, _, err := genericapiserveroptions.CreateListener("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kubeAPIServerOptions := options.NewServerRunOptions()
|
||||
kubeAPIServerOptions.InsecureServing.BindPort = 0
|
||||
kubeAPIServerOptions.SecureServing.Listener = listener
|
||||
kubeAPIServerOptions.SecureServing.ServerCert.CertDirectory = certDir
|
||||
kubeAPIServerOptions.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURL()}
|
||||
kubeAPIServerOptions.Etcd.DefaultStorageMediaType = runtime.ContentTypeJSON // force json we can easily interpret the result in etcd
|
||||
kubeAPIServerOptions.ServiceClusterIPRange = *defaultServiceClusterIPRange
|
||||
kubeAPIServerOptions.Authorization.Modes = []string{"RBAC"}
|
||||
kubeAPIServerOptions.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
|
||||
completedOptions, err := app.Complete(kubeAPIServerOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerOptions.APIEnablement.RuntimeConfig.Set("api/all=true")
|
||||
|
||||
kubeAPIServer, err := app.CreateServerChain(completedOptions, wait.NeverStop)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeClientConfig := restclient.CopyConfig(kubeAPIServer.LoopbackClientConfig)
|
||||
|
||||
go func() {
|
||||
// Catch panics that occur in this go routine so we get a comprehensible failure
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
t.Errorf("Unexpected panic trying to start API master: %#v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := kubeAPIServer.PrepareRun().Run(wait.NeverStop); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
lastHealth := ""
|
||||
if err := wait.PollImmediate(time.Second, time.Minute, func() (done bool, err error) {
|
||||
// wait for the server to be healthy
|
||||
result := clientset.NewForConfigOrDie(kubeClientConfig).RESTClient().Get().AbsPath("/healthz").Do()
|
||||
content, _ := result.Raw()
|
||||
lastHealth = string(content)
|
||||
if errResult := result.Error(); errResult != nil {
|
||||
t.Log(errResult)
|
||||
return false, nil
|
||||
}
|
||||
var status int
|
||||
result.StatusCode(&status)
|
||||
return status == http.StatusOK, nil
|
||||
}); err != nil {
|
||||
t.Log(lastHealth)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// this test makes lots of requests, don't be slow
|
||||
kubeClientConfig.QPS = 99999
|
||||
kubeClientConfig.Burst = 9999
|
||||
|
||||
return kubeClientConfig
|
||||
}
|
||||
|
||||
func gvr(g, v, r string) schema.GroupVersionResource {
|
||||
return schema.GroupVersionResource{Group: g, Version: v, Resource: r}
|
||||
}
|
||||
|
||||
type resourceToTest struct {
|
||||
gvk schema.GroupVersionKind
|
||||
gvr schema.GroupVersionResource
|
||||
namespaced bool
|
||||
hasDeleteCollection bool
|
||||
}
|
||||
|
||||
func getResourcesToTest(serverResources []*metav1.APIResourceList, isOAPI bool, t *testing.T) []resourceToTest {
|
||||
resourcesToTest := []resourceToTest{}
|
||||
|
||||
for _, discoveryGroup := range serverResources {
|
||||
for _, discoveryResource := range discoveryGroup.APIResources {
|
||||
// this is a subresource, skip it
|
||||
if strings.Contains(discoveryResource.Name, "/") {
|
||||
continue
|
||||
}
|
||||
hasCreate := false
|
||||
hasGet := false
|
||||
hasDeleteCollection := false
|
||||
for _, verb := range discoveryResource.Verbs {
|
||||
if string(verb) == "get" {
|
||||
hasGet = true
|
||||
}
|
||||
if string(verb) == "create" {
|
||||
hasCreate = true
|
||||
}
|
||||
if string(verb) == "deletecollection" {
|
||||
hasDeleteCollection = true
|
||||
}
|
||||
}
|
||||
if !(hasCreate && hasGet) {
|
||||
continue
|
||||
}
|
||||
|
||||
resourceGV, err := schema.ParseGroupVersion(discoveryGroup.GroupVersion)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gvk := resourceGV.WithKind(discoveryResource.Kind)
|
||||
if len(discoveryResource.Group) > 0 || len(discoveryResource.Version) > 0 {
|
||||
gvk = schema.GroupVersionKind{
|
||||
Group: discoveryResource.Group,
|
||||
Version: discoveryResource.Version,
|
||||
Kind: discoveryResource.Kind,
|
||||
}
|
||||
}
|
||||
gvr := resourceGV.WithResource(discoveryResource.Name)
|
||||
|
||||
resourcesToTest = append(resourcesToTest, resourceToTest{
|
||||
gvk: gvk,
|
||||
gvr: gvr,
|
||||
namespaced: discoveryResource.Namespaced,
|
||||
hasDeleteCollection: hasDeleteCollection,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return resourcesToTest
|
||||
}
|
27
vendor/k8s.io/kubernetes/test/integration/dryrun/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/dryrun/main_test.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dryrun
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
35
vendor/k8s.io/kubernetes/test/integration/etcd/BUILD
generated
vendored
35
vendor/k8s.io/kubernetes/test/integration/etcd/BUILD
generated
vendored
@@ -19,29 +19,26 @@ go_test(
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app:go_default_library",
|
||||
"//cmd/kube-apiserver/app/options:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery/cached:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery/cached:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
6
vendor/k8s.io/kubernetes/test/integration/etcd/OWNERS
generated
vendored
Executable file
6
vendor/k8s.io/kubernetes/test/integration/etcd/OWNERS
generated
vendored
Executable file
@@ -0,0 +1,6 @@
|
||||
approvers:
|
||||
- enj
|
||||
reviewers:
|
||||
- deads2k
|
||||
- liggitt
|
||||
- enj
|
626
vendor/k8s.io/kubernetes/test/integration/etcd/etcd_storage_path_test.go
generated
vendored
626
vendor/k8s.io/kubernetes/test/integration/etcd/etcd_storage_path_test.go
generated
vendored
@@ -21,13 +21,11 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -35,22 +33,20 @@ import (
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
genericapiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||
cacheddiscovery "k8s.io/client-go/discovery/cached"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/client-go/restmapper"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
kapi "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
||||
@@ -58,7 +54,6 @@ import (
|
||||
_ "k8s.io/kubernetes/pkg/master" // TODO what else is needed
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"k8s.io/client-go/restmapper"
|
||||
)
|
||||
|
||||
// Etcd data for all persisted objects.
|
||||
@@ -215,6 +210,14 @@ var etcdStorageData = map[schema.GroupVersionResource]struct {
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2
|
||||
gvr("autoscaling", "v2beta2", "horizontalpodautoscalers"): {
|
||||
stub: `{"metadata": {"name": "hpa3"}, "spec": {"maxReplicas": 3, "scaleTargetRef": {"kind": "something", "name": "cross"}}}`,
|
||||
expectedEtcdPath: "/registry/horizontalpodautoscalers/etcdstoragepathtestnamespace/hpa3",
|
||||
expectedGVK: gvkP("autoscaling", "v1", "HorizontalPodAutoscaler"),
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/batch/v1
|
||||
gvr("batch", "v1", "jobs"): {
|
||||
stub: `{"metadata": {"name": "job1"}, "spec": {"manualSelector": true, "selector": {"matchLabels": {"controller-uid": "uid1"}}, "template": {"metadata": {"labels": {"controller-uid": "uid1"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container1"}], "dnsPolicy": "ClusterFirst", "restartPolicy": "Never"}}}}`,
|
||||
@@ -244,6 +247,13 @@ var etcdStorageData = map[schema.GroupVersionResource]struct {
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/coordination/v1beta1
|
||||
gvr("coordination.k8s.io", "v1beta1", "leases"): {
|
||||
stub: `{"metadata": {"name": "lease1"}, "spec": {"holderIdentity": "holder", "leaseDurationSeconds": 5}}`,
|
||||
expectedEtcdPath: "/registry/leases/etcdstoragepathtestnamespace/lease1",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/events/v1beta1
|
||||
gvr("events.k8s.io", "v1beta1", "events"): {
|
||||
stub: `{"metadata": {"name": "event2"}, "regarding": {"namespace": "etcdstoragepathtestnamespace"}, "note": "some data here", "eventTime": "2017-08-09T15:04:05.000000Z", "reportingInstance": "node-xyz", "reportingController": "k8s.io/my-controller", "action": "DidNothing", "reason": "Laziness"}`,
|
||||
@@ -434,119 +444,36 @@ var etcdStorageData = map[schema.GroupVersionResource]struct {
|
||||
expectedEtcdPath: "/registry/priorityclasses/pc2",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1
|
||||
// depends on aggregator using the same ungrouped RESTOptionsGetter as the kube apiserver, not SimpleRestOptionsFactory in aggregator.go
|
||||
gvr("apiregistration.k8s.io", "v1beta1", "apiservices"): {
|
||||
stub: `{"metadata": {"name": "as1.foo.com"}, "spec": {"group": "foo.com", "version": "as1", "groupPriorityMinimum":100, "versionPriority":10}}`,
|
||||
expectedEtcdPath: "/registry/apiregistration.k8s.io/apiservices/as1.foo.com",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
|
||||
// depends on aggregator using the same ungrouped RESTOptionsGetter as the kube apiserver, not SimpleRestOptionsFactory in aggregator.go
|
||||
gvr("apiregistration.k8s.io", "v1", "apiservices"): {
|
||||
stub: `{"metadata": {"name": "as2.foo.com"}, "spec": {"group": "foo.com", "version": "as2", "groupPriorityMinimum":100, "versionPriority":10}}`,
|
||||
expectedEtcdPath: "/registry/apiregistration.k8s.io/apiservices/as2.foo.com",
|
||||
expectedGVK: gvkP("apiregistration.k8s.io", "v1beta1", "APIService"),
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1
|
||||
gvr("apiextensions.k8s.io", "v1beta1", "customresourcedefinitions"): {
|
||||
stub: `{"metadata": {"name": "openshiftwebconsoleconfigs.webconsole.operator.openshift.io"},"spec": {"scope": "Cluster","group": "webconsole.operator.openshift.io","version": "v1alpha1","names": {"kind": "OpenShiftWebConsoleConfig","plural": "openshiftwebconsoleconfigs","singular": "openshiftwebconsoleconfig"}}}`,
|
||||
expectedEtcdPath: "/registry/apiextensions.k8s.io/customresourcedefinitions/openshiftwebconsoleconfigs.webconsole.operator.openshift.io",
|
||||
},
|
||||
// --
|
||||
|
||||
}
|
||||
|
||||
// Be very careful when whitelisting an object as ephemeral.
|
||||
// Doing so removes the safety we gain from this test by skipping that object.
|
||||
var ephemeralWhiteList = createEphemeralWhiteList(
|
||||
|
||||
// k8s.io/kubernetes/pkg/api/v1
|
||||
gvk("", "v1", "Binding"), // annotation on pod, not stored in etcd
|
||||
gvk("", "v1", "RangeAllocation"), // stored in various places in etcd but cannot be directly created
|
||||
gvk("", "v1", "ComponentStatus"), // status info not stored in etcd
|
||||
gvk("", "v1", "SerializedReference"), // used for serilization, not stored in etcd
|
||||
gvk("", "v1", "PodStatusResult"), // wrapper object not stored in etcd
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/authentication/v1beta1
|
||||
gvk("authentication.k8s.io", "v1beta1", "TokenReview"), // not stored in etcd
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/authentication/v1
|
||||
gvk("authentication.k8s.io", "v1", "TokenReview"), // not stored in etcd
|
||||
gvk("authentication.k8s.io", "v1", "TokenRequest"), // not stored in etcd
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/authorization/v1beta1
|
||||
|
||||
// SRR objects that are not stored in etcd
|
||||
gvk("authorization.k8s.io", "v1beta1", "SelfSubjectRulesReview"),
|
||||
// SAR objects that are not stored in etcd
|
||||
gvk("authorization.k8s.io", "v1beta1", "SelfSubjectAccessReview"),
|
||||
gvk("authorization.k8s.io", "v1beta1", "LocalSubjectAccessReview"),
|
||||
gvk("authorization.k8s.io", "v1beta1", "SubjectAccessReview"),
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/authorization/v1
|
||||
|
||||
// SRR objects that are not stored in etcd
|
||||
gvk("authorization.k8s.io", "v1", "SelfSubjectRulesReview"),
|
||||
// SAR objects that are not stored in etcd
|
||||
gvk("authorization.k8s.io", "v1", "SelfSubjectAccessReview"),
|
||||
gvk("authorization.k8s.io", "v1", "LocalSubjectAccessReview"),
|
||||
gvk("authorization.k8s.io", "v1", "SubjectAccessReview"),
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/autoscaling/v1
|
||||
gvk("autoscaling", "v1", "Scale"), // not stored in etcd, part of kapiv1.ReplicationController
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/apps/v1beta1
|
||||
gvk("apps", "v1beta1", "Scale"), // not stored in etcd, part of kapiv1.ReplicationController
|
||||
gvk("apps", "v1beta1", "DeploymentRollback"), // used to rollback deployment, not stored in etcd
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/apps/v1beta2
|
||||
gvk("apps", "v1beta2", "Scale"), // not stored in etcd, part of kapiv1.ReplicationController
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/batch/v1beta1
|
||||
gvk("batch", "v1beta1", "JobTemplate"), // not stored in etcd
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/batch/v2alpha1
|
||||
gvk("batch", "v2alpha1", "JobTemplate"), // not stored in etcd
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1
|
||||
gvk("componentconfig", "v1alpha1", "KubeSchedulerConfiguration"), // not stored in etcd
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/extensions/v1beta1
|
||||
gvk("extensions", "v1beta1", "DeploymentRollback"), // used to rollback deployment, not stored in etcd
|
||||
gvk("extensions", "v1beta1", "ReplicationControllerDummy"), // not stored in etcd
|
||||
gvk("extensions", "v1beta1", "Scale"), // not stored in etcd, part of kapiv1.ReplicationController
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1
|
||||
gvk("imagepolicy.k8s.io", "v1alpha1", "ImageReview"), // not stored in etcd
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/policy/v1beta1
|
||||
gvk("policy", "v1beta1", "Eviction"), // not stored in etcd, deals with evicting kapiv1.Pod
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/admission/v1beta1
|
||||
gvk("admission.k8s.io", "v1beta1", "AdmissionReview"), // not stored in etcd, call out to webhooks.
|
||||
// --
|
||||
)
|
||||
|
||||
// Only add kinds to this list when there is no way to create the object
|
||||
var kindWhiteList = sets.NewString(
|
||||
// k8s.io/kubernetes/pkg/api/v1
|
||||
"DeleteOptions",
|
||||
"ExportOptions",
|
||||
"ListOptions",
|
||||
"NodeProxyOptions",
|
||||
"PodAttachOptions",
|
||||
"PodExecOptions",
|
||||
"PodLogOptions",
|
||||
"PodProxyOptions",
|
||||
"ServiceProxyOptions",
|
||||
"GetOptions",
|
||||
"APIGroup",
|
||||
"PodPortForwardOptions",
|
||||
"APIVersions",
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/watch/versioned
|
||||
"WatchEvent",
|
||||
// --
|
||||
|
||||
// k8s.io/apimachinery/pkg/apis/meta/v1
|
||||
"Status",
|
||||
// --
|
||||
)
|
||||
// Only add kinds to this list when this a virtual resource with get and create verbs that doesn't actually
|
||||
// store into it's kind. We've used this downstream for mappings before.
|
||||
var kindWhiteList = sets.NewString()
|
||||
|
||||
// namespace used for all tests, do not change this
|
||||
const testNamespace = "etcdstoragepathtestnamespace"
|
||||
@@ -559,67 +486,73 @@ func TestEtcdStoragePath(t *testing.T) {
|
||||
certDir, _ := ioutil.TempDir("", "test-integration-etcd")
|
||||
defer os.RemoveAll(certDir)
|
||||
|
||||
client, kvClient, mapper := startRealMasterOrDie(t, certDir)
|
||||
clientConfig, kvClient := startRealMasterOrDie(t, certDir)
|
||||
defer func() {
|
||||
dumpEtcdKVOnFailure(t, kvClient)
|
||||
}()
|
||||
|
||||
client := &allClient{dynamicClient: dynamic.NewForConfigOrDie(clientConfig)}
|
||||
kubeClient := clientset.NewForConfigOrDie(clientConfig)
|
||||
if _, err := kubeClient.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
discoveryClient := cacheddiscovery.NewMemCacheClient(kubeClient.Discovery())
|
||||
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient)
|
||||
restMapper.Reset()
|
||||
|
||||
resourcesToPersist := []resourceToPersist{}
|
||||
serverResources, err := kubeClient.Discovery().ServerResources()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resourcesToPersist = append(resourcesToPersist, getResourcesToPersist(serverResources, false, t)...)
|
||||
|
||||
kindSeen := sets.NewString()
|
||||
pathSeen := map[string][]schema.GroupVersionResource{}
|
||||
etcdSeen := map[schema.GroupVersionResource]empty{}
|
||||
ephemeralSeen := map[schema.GroupVersionKind]empty{}
|
||||
cohabitatingResources := map[string]map[schema.GroupVersionKind]empty{}
|
||||
|
||||
for gvk, apiType := range legacyscheme.Scheme.AllKnownTypes() {
|
||||
// we do not care about internal objects or lists // TODO make sure this is always true
|
||||
if gvk.Version == runtime.APIVersionInternal || strings.HasSuffix(apiType.Name(), "List") {
|
||||
continue
|
||||
}
|
||||
for _, resourceToPersist := range resourcesToPersist {
|
||||
t.Run(resourceToPersist.gvr.String(), func(t *testing.T) {
|
||||
gvk := resourceToPersist.gvk
|
||||
gvResource := resourceToPersist.gvr
|
||||
kind := gvk.Kind
|
||||
|
||||
kind := gvk.Kind
|
||||
pkgPath := apiType.PkgPath()
|
||||
|
||||
if kindWhiteList.Has(kind) {
|
||||
kindSeen.Insert(kind)
|
||||
continue
|
||||
}
|
||||
_, isEphemeral := ephemeralWhiteList[gvk]
|
||||
if isEphemeral {
|
||||
ephemeralSeen[gvk] = empty{}
|
||||
continue
|
||||
}
|
||||
|
||||
mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error getting mapping for %s from %s with GVK %s: %v", kind, pkgPath, gvk, err)
|
||||
continue
|
||||
}
|
||||
|
||||
etcdSeen[mapping.Resource] = empty{}
|
||||
|
||||
testData, hasTest := etcdStorageData[mapping.Resource]
|
||||
|
||||
if !hasTest {
|
||||
t.Errorf("no test data for %s from %s. Please add a test for your new type to etcdStorageData.", kind, pkgPath)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(testData.expectedEtcdPath) == 0 {
|
||||
t.Errorf("empty test data for %s from %s", kind, pkgPath)
|
||||
continue
|
||||
}
|
||||
|
||||
shouldCreate := len(testData.stub) != 0 // try to create only if we have a stub
|
||||
|
||||
var input *metaObject
|
||||
if shouldCreate {
|
||||
if input, err = jsonToMetaObject([]byte(testData.stub)); err != nil || input.isEmpty() {
|
||||
t.Errorf("invalid test data for %s from %s: %v", kind, pkgPath, err)
|
||||
continue
|
||||
mapping := &meta.RESTMapping{
|
||||
Resource: resourceToPersist.gvr,
|
||||
GroupVersionKind: resourceToPersist.gvk,
|
||||
Scope: meta.RESTScopeRoot,
|
||||
}
|
||||
if resourceToPersist.namespaced {
|
||||
mapping.Scope = meta.RESTScopeNamespace
|
||||
}
|
||||
|
||||
if kindWhiteList.Has(kind) {
|
||||
kindSeen.Insert(kind)
|
||||
t.Skip("whitelisted")
|
||||
}
|
||||
|
||||
etcdSeen[gvResource] = empty{}
|
||||
testData, hasTest := etcdStorageData[gvResource]
|
||||
|
||||
if !hasTest {
|
||||
t.Fatalf("no test data for %s. Please add a test for your new type to etcdStorageData.", gvResource)
|
||||
}
|
||||
|
||||
if len(testData.expectedEtcdPath) == 0 {
|
||||
t.Fatalf("empty test data for %s", gvResource)
|
||||
}
|
||||
|
||||
shouldCreate := len(testData.stub) != 0 // try to create only if we have a stub
|
||||
|
||||
var input *metaObject
|
||||
if shouldCreate {
|
||||
if input, err = jsonToMetaObject([]byte(testData.stub)); err != nil || input.isEmpty() {
|
||||
t.Fatalf("invalid test data for %s: %v", gvResource, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func() { // forces defer to run per iteration of the for loop
|
||||
all := &[]cleanupData{}
|
||||
defer func() {
|
||||
if !t.Failed() { // do not cleanup if test has already failed since we may need things in the etcd dump
|
||||
@@ -629,54 +562,46 @@ func TestEtcdStoragePath(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
|
||||
if err := client.createPrerequisites(mapper, testNamespace, testData.prerequisites, all); err != nil {
|
||||
t.Errorf("failed to create prerequisites for %s from %s: %#v", kind, pkgPath, err)
|
||||
return
|
||||
if err := client.createPrerequisites(restMapper, testNamespace, testData.prerequisites, all); err != nil {
|
||||
t.Fatalf("failed to create prerequisites for %s: %#v", gvResource, err)
|
||||
}
|
||||
|
||||
if shouldCreate { // do not try to create items with no stub
|
||||
if err := client.create(testData.stub, testNamespace, mapping, all); err != nil {
|
||||
t.Errorf("failed to create stub for %s from %s: %#v", kind, pkgPath, err)
|
||||
return
|
||||
t.Fatalf("failed to create stub for %s: %#v", gvResource, err)
|
||||
}
|
||||
}
|
||||
|
||||
output, err := getFromEtcd(kvClient, testData.expectedEtcdPath)
|
||||
if err != nil {
|
||||
t.Errorf("failed to get from etcd for %s from %s: %#v", kind, pkgPath, err)
|
||||
return
|
||||
t.Fatalf("failed to get from etcd for %s: %#v", gvResource, err)
|
||||
}
|
||||
|
||||
expectedGVK := gvk
|
||||
if testData.expectedGVK != nil {
|
||||
if gvk == *testData.expectedGVK {
|
||||
t.Errorf("GVK override %s for %s from %s is unnecessary or something was changed incorrectly", testData.expectedGVK, kind, pkgPath)
|
||||
t.Errorf("GVK override %s for %s is unnecessary or something was changed incorrectly", testData.expectedGVK, gvk)
|
||||
}
|
||||
expectedGVK = *testData.expectedGVK
|
||||
}
|
||||
|
||||
actualGVK := output.getGVK()
|
||||
if actualGVK != expectedGVK {
|
||||
t.Errorf("GVK for %s from %s does not match, expected %s got %s", kind, pkgPath, expectedGVK, actualGVK)
|
||||
t.Errorf("GVK for %s does not match, expected %s got %s", kind, expectedGVK, actualGVK)
|
||||
}
|
||||
|
||||
if !apiequality.Semantic.DeepDerivative(input, output) {
|
||||
t.Errorf("Test stub for %s from %s does not match: %s", kind, pkgPath, diff.ObjectGoPrintDiff(input, output))
|
||||
t.Errorf("Test stub for %s does not match: %s", kind, diff.ObjectGoPrintDiff(input, output))
|
||||
}
|
||||
|
||||
addGVKToEtcdBucket(cohabitatingResources, actualGVK, getEtcdBucket(testData.expectedEtcdPath))
|
||||
pathSeen[testData.expectedEtcdPath] = append(pathSeen[testData.expectedEtcdPath], mapping.Resource)
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
if inEtcdData, inEtcdSeen := diffMaps(etcdStorageData, etcdSeen); len(inEtcdData) != 0 || len(inEtcdSeen) != 0 {
|
||||
t.Errorf("etcd data does not match the types we saw:\nin etcd data but not seen:\n%s\nseen but not in etcd data:\n%s", inEtcdData, inEtcdSeen)
|
||||
}
|
||||
|
||||
if inEphemeralWhiteList, inEphemeralSeen := diffMaps(ephemeralWhiteList, ephemeralSeen); len(inEphemeralWhiteList) != 0 || len(inEphemeralSeen) != 0 {
|
||||
t.Errorf("ephemeral whitelist does not match the types we saw:\nin ephemeral whitelist but not seen:\n%s\nseen but not in ephemeral whitelist:\n%s", inEphemeralWhiteList, inEphemeralSeen)
|
||||
}
|
||||
|
||||
if inKindData, inKindSeen := diffMaps(kindWhiteList, kindSeen); len(inKindData) != 0 || len(inKindSeen) != 0 {
|
||||
t.Errorf("kind whitelist data does not match the types we saw:\nin kind whitelist but not seen:\n%s\nseen but not in kind whitelist:\n%s", inKindData, inKindSeen)
|
||||
}
|
||||
@@ -702,14 +627,37 @@ func TestEtcdStoragePath(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func startRealMasterOrDie(t *testing.T, certDir string) (*allClient, clientv3.KV, meta.RESTMapper) {
|
||||
func startRealMasterOrDie(t *testing.T, certDir string) (*restclient.Config, clientv3.KV) {
|
||||
_, defaultServiceClusterIPRange, err := net.ParseCIDR("10.0.0.0/24")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kubeClientConfigValue := atomic.Value{}
|
||||
storageConfigValue := atomic.Value{}
|
||||
listener, _, err := genericapiserveroptions.CreateListener("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kubeAPIServerOptions := options.NewServerRunOptions()
|
||||
kubeAPIServerOptions.InsecureServing.BindPort = 0
|
||||
kubeAPIServerOptions.SecureServing.Listener = listener
|
||||
kubeAPIServerOptions.SecureServing.ServerCert.CertDirectory = certDir
|
||||
kubeAPIServerOptions.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURL()}
|
||||
kubeAPIServerOptions.Etcd.DefaultStorageMediaType = runtime.ContentTypeJSON // force json we can easily interpret the result in etcd
|
||||
kubeAPIServerOptions.ServiceClusterIPRange = *defaultServiceClusterIPRange
|
||||
kubeAPIServerOptions.Authorization.Modes = []string{"RBAC"}
|
||||
kubeAPIServerOptions.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
|
||||
completedOptions, err := app.Complete(kubeAPIServerOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerOptions.APIEnablement.RuntimeConfig.Set("api/all=true")
|
||||
|
||||
kubeAPIServer, err := app.CreateServerChain(completedOptions, wait.NeverStop)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeClientConfig := restclient.CopyConfig(kubeAPIServer.LoopbackClientConfig)
|
||||
|
||||
go func() {
|
||||
// Catch panics that occur in this go routine so we get a comprehensible failure
|
||||
@@ -719,66 +667,17 @@ func startRealMasterOrDie(t *testing.T, certDir string) (*allClient, clientv3.KV
|
||||
}
|
||||
}()
|
||||
|
||||
listener, _, err := genericapiserveroptions.CreateListener("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kubeAPIServerOptions := options.NewServerRunOptions()
|
||||
kubeAPIServerOptions.SecureServing.Listener = listener
|
||||
kubeAPIServerOptions.SecureServing.ServerCert.CertDirectory = certDir
|
||||
kubeAPIServerOptions.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURL()}
|
||||
kubeAPIServerOptions.Etcd.DefaultStorageMediaType = runtime.ContentTypeJSON // TODO use protobuf?
|
||||
kubeAPIServerOptions.ServiceClusterIPRange = *defaultServiceClusterIPRange
|
||||
kubeAPIServerOptions.Authorization.Modes = []string{"RBAC"}
|
||||
kubeAPIServerOptions.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
|
||||
completedOptions, err := app.Complete(kubeAPIServerOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tunneler, proxyTransport, err := app.CreateNodeDialer(completedOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerConfig, sharedInformers, versionedInformers, _, _, _, admissionPostStartHook, err := app.CreateKubeAPIServerConfig(completedOptions, tunneler, proxyTransport)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kubeAPIServerConfig.ExtraConfig.APIResourceConfigSource = &allResourceSource{} // force enable all resources
|
||||
|
||||
kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.NewEmptyDelegate(), sharedInformers, versionedInformers, admissionPostStartHook)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kubeClientConfigValue.Store(kubeAPIServerConfig.GenericConfig.LoopbackClientConfig)
|
||||
storageConfigValue.Store(kubeAPIServerOptions.Etcd.StorageConfig)
|
||||
|
||||
if err := kubeAPIServer.GenericAPIServer.PrepareRun().Run(wait.NeverStop); err != nil {
|
||||
if err := kubeAPIServer.PrepareRun().Run(wait.NeverStop); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
lastHealth := ""
|
||||
if err := wait.PollImmediate(time.Second, time.Minute, func() (done bool, err error) {
|
||||
obj := kubeClientConfigValue.Load()
|
||||
if obj == nil {
|
||||
return false, nil
|
||||
}
|
||||
kubeClientConfig := kubeClientConfigValue.Load().(*restclient.Config)
|
||||
// make a copy so we can mutate it to set GroupVersion and NegotiatedSerializer
|
||||
cfg := *kubeClientConfig
|
||||
cfg.ContentConfig.GroupVersion = &schema.GroupVersion{}
|
||||
cfg.ContentConfig.NegotiatedSerializer = legacyscheme.Codecs
|
||||
privilegedClient, err := restclient.RESTClientFor(&cfg)
|
||||
if err != nil {
|
||||
// this happens because we race the API server start
|
||||
t.Log(err)
|
||||
return false, nil
|
||||
}
|
||||
// wait for the server to be healthy
|
||||
result := privilegedClient.Get().AbsPath("/healthz").Do()
|
||||
result := clientset.NewForConfigOrDie(kubeClientConfig).RESTClient().Get().AbsPath("/healthz").Do()
|
||||
content, _ := result.Raw()
|
||||
lastHealth = string(content)
|
||||
if errResult := result.Error(); errResult != nil {
|
||||
t.Log(errResult)
|
||||
return false, nil
|
||||
@@ -787,32 +686,20 @@ func startRealMasterOrDie(t *testing.T, certDir string) (*allClient, clientv3.KV
|
||||
result.StatusCode(&status)
|
||||
return status == http.StatusOK, nil
|
||||
}); err != nil {
|
||||
t.Log(lastHealth)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kubeClientConfig := kubeClientConfigValue.Load().(*restclient.Config)
|
||||
storageConfig := storageConfigValue.Load().(storagebackend.Config)
|
||||
// this test makes lots of requests, don't be slow
|
||||
kubeClientConfig.QPS = 99999
|
||||
kubeClientConfig.Burst = 9999
|
||||
|
||||
kubeClient := clientset.NewForConfigOrDie(kubeClientConfig)
|
||||
if _, err := kubeClient.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
client, err := newClient(*kubeClientConfig)
|
||||
kvClient, err := integration.GetEtcdKVClient(kubeAPIServerOptions.Etcd.StorageConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kvClient, err := integration.GetEtcdKVClient(storageConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
discoveryClient := cacheddiscovery.NewMemCacheClient(kubeClient.Discovery())
|
||||
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient)
|
||||
restMapper.Reset()
|
||||
|
||||
return client, kvClient, restMapper
|
||||
return kubeClientConfig, kvClient
|
||||
}
|
||||
|
||||
func dumpEtcdKVOnFailure(t *testing.T, kvClient clientv3.KV) {
|
||||
@@ -853,14 +740,14 @@ func getEtcdBucket(path string) string {
|
||||
// stable fields to compare as a sanity check
|
||||
type metaObject struct {
|
||||
// all of type meta
|
||||
Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
|
||||
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"`
|
||||
Kind string `json:"kind,omitempty"`
|
||||
APIVersion string `json:"apiVersion,omitempty"`
|
||||
|
||||
// parts of object meta
|
||||
Metadata struct {
|
||||
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
|
||||
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
|
||||
} `json:"metadata,omitempty" protobuf:"bytes,3,opt,name=metadata"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
} `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
func (obj *metaObject) getGVK() schema.GroupVersionKind {
|
||||
@@ -879,33 +766,18 @@ type prerequisite struct {
|
||||
type empty struct{}
|
||||
|
||||
type cleanupData struct {
|
||||
obj runtime.Object
|
||||
mapping *meta.RESTMapping
|
||||
obj *unstructured.Unstructured
|
||||
resource schema.GroupVersionResource
|
||||
}
|
||||
|
||||
func gvr(g, v, r string) schema.GroupVersionResource {
|
||||
return schema.GroupVersionResource{Group: g, Version: v, Resource: r}
|
||||
}
|
||||
|
||||
func gvk(g, v, k string) schema.GroupVersionKind {
|
||||
return schema.GroupVersionKind{Group: g, Version: v, Kind: k}
|
||||
}
|
||||
|
||||
func gvkP(g, v, k string) *schema.GroupVersionKind {
|
||||
return &schema.GroupVersionKind{Group: g, Version: v, Kind: k}
|
||||
}
|
||||
|
||||
func createEphemeralWhiteList(gvks ...schema.GroupVersionKind) map[schema.GroupVersionKind]empty {
|
||||
ephemeral := map[schema.GroupVersionKind]empty{}
|
||||
for _, gvKind := range gvks {
|
||||
if _, ok := ephemeral[gvKind]; ok {
|
||||
panic("invalid ephemeral whitelist contains duplicate keys")
|
||||
}
|
||||
ephemeral[gvKind] = empty{}
|
||||
}
|
||||
return ephemeral
|
||||
}
|
||||
|
||||
func jsonToMetaObject(stub []byte) (*metaObject, error) {
|
||||
obj := &metaObject{}
|
||||
if err := json.Unmarshal(stub, obj); err != nil {
|
||||
@@ -929,67 +801,38 @@ func keyStringer(i interface{}) string {
|
||||
}
|
||||
|
||||
type allClient struct {
|
||||
client *http.Client
|
||||
config *restclient.Config
|
||||
backoff restclient.BackoffManager
|
||||
}
|
||||
|
||||
func (c *allClient) verb(verb string, gvk schema.GroupVersionKind) (*restclient.Request, error) {
|
||||
apiPath := "/apis"
|
||||
if gvk.Group == kapi.GroupName {
|
||||
apiPath = "/api"
|
||||
}
|
||||
baseURL, versionedAPIPath, err := restclient.DefaultServerURL(c.config.Host, apiPath, gvk.GroupVersion(), true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
contentConfig := c.config.ContentConfig
|
||||
gv := gvk.GroupVersion()
|
||||
contentConfig.GroupVersion = &gv
|
||||
serializers, err := createSerializers(contentConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return restclient.NewRequest(c.client, verb, baseURL, versionedAPIPath, contentConfig, *serializers, c.backoff, c.config.RateLimiter, 0), nil
|
||||
dynamicClient dynamic.Interface
|
||||
}
|
||||
|
||||
func (c *allClient) create(stub, ns string, mapping *meta.RESTMapping, all *[]cleanupData) error {
|
||||
req, err := c.verb("POST", mapping.GroupVersionKind)
|
||||
// we don't require GVK on the data we provide, so we fill it in here. We could, but that seems extraneous.
|
||||
typeMetaAdder := map[string]interface{}{}
|
||||
err := json.Unmarshal([]byte(stub), &typeMetaAdder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
namespaced := mapping.Scope.Name() == meta.RESTScopeNameNamespace
|
||||
output, err := req.NamespaceIfScoped(ns, namespaced).Resource(mapping.Resource.Resource).Body(strings.NewReader(stub)).Do().Get()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*all = append(*all, cleanupData{output, mapping})
|
||||
return nil
|
||||
}
|
||||
typeMetaAdder["apiVersion"] = mapping.GroupVersionKind.GroupVersion().String()
|
||||
typeMetaAdder["kind"] = mapping.GroupVersionKind.Kind
|
||||
|
||||
func (c *allClient) destroy(obj runtime.Object, mapping *meta.RESTMapping) error {
|
||||
req, err := c.verb("DELETE", mapping.GroupVersionKind)
|
||||
if mapping.Scope == meta.RESTScopeRoot {
|
||||
ns = ""
|
||||
}
|
||||
obj := &unstructured.Unstructured{Object: typeMetaAdder}
|
||||
actual, err := c.dynamicClient.Resource(mapping.Resource).Namespace(ns).Create(obj, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
namespaced := mapping.Scope.Name() == meta.RESTScopeNameNamespace
|
||||
name, err := meta.NewAccessor().Name(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ns, err := meta.NewAccessor().Namespace(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return req.NamespaceIfScoped(ns, namespaced).Resource(mapping.Resource.Resource).Name(name).Do().Error()
|
||||
|
||||
*all = append(*all, cleanupData{actual, mapping.Resource})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *allClient) cleanup(all *[]cleanupData) error {
|
||||
for i := len(*all) - 1; i >= 0; i-- { // delete in reverse order in case creation order mattered
|
||||
obj := (*all)[i].obj
|
||||
mapping := (*all)[i].mapping
|
||||
gvr := (*all)[i].resource
|
||||
|
||||
if err := c.destroy(obj, mapping); err != nil {
|
||||
if err := c.dynamicClient.Resource(gvr).Namespace(obj.GetNamespace()).Delete(obj.GetName(), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -1013,81 +856,6 @@ func (c *allClient) createPrerequisites(mapper meta.RESTMapper, ns string, prere
|
||||
return nil
|
||||
}
|
||||
|
||||
func newClient(config restclient.Config) (*allClient, error) {
|
||||
config.ContentConfig.NegotiatedSerializer = legacyscheme.Codecs
|
||||
config.ContentConfig.ContentType = "application/json"
|
||||
config.Timeout = 30 * time.Second
|
||||
config.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(3, 10)
|
||||
|
||||
transport, err := restclient.TransportFor(&config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: transport,
|
||||
Timeout: config.Timeout,
|
||||
}
|
||||
|
||||
backoff := &restclient.URLBackoff{
|
||||
Backoff: flowcontrol.NewBackOff(1*time.Second, 10*time.Second),
|
||||
}
|
||||
|
||||
return &allClient{
|
||||
client: client,
|
||||
config: &config,
|
||||
backoff: backoff,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// copied from restclient
|
||||
func createSerializers(config restclient.ContentConfig) (*restclient.Serializers, error) {
|
||||
mediaTypes := config.NegotiatedSerializer.SupportedMediaTypes()
|
||||
contentType := config.ContentType
|
||||
mediaType, _, err := mime.ParseMediaType(contentType)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("the content type specified in the client configuration is not recognized: %v", err)
|
||||
}
|
||||
info, ok := runtime.SerializerInfoForMediaType(mediaTypes, mediaType)
|
||||
if !ok {
|
||||
if len(contentType) != 0 || len(mediaTypes) == 0 {
|
||||
return nil, fmt.Errorf("no serializers registered for %s", contentType)
|
||||
}
|
||||
info = mediaTypes[0]
|
||||
}
|
||||
|
||||
internalGV := schema.GroupVersions{
|
||||
{
|
||||
Group: config.GroupVersion.Group,
|
||||
Version: runtime.APIVersionInternal,
|
||||
},
|
||||
// always include the legacy group as a decoding target to handle non-error `Status` return types
|
||||
{
|
||||
Group: "",
|
||||
Version: runtime.APIVersionInternal,
|
||||
},
|
||||
}
|
||||
|
||||
s := &restclient.Serializers{
|
||||
Encoder: config.NegotiatedSerializer.EncoderForVersion(info.Serializer, *config.GroupVersion),
|
||||
Decoder: config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV),
|
||||
|
||||
RenegotiatedDecoder: func(contentType string, params map[string]string) (runtime.Decoder, error) {
|
||||
info, ok := runtime.SerializerInfoForMediaType(mediaTypes, contentType)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("serializer for %s not registered", contentType)
|
||||
}
|
||||
return config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV), nil
|
||||
},
|
||||
}
|
||||
if info.StreamSerializer != nil {
|
||||
s.StreamingSerializer = info.StreamSerializer.Serializer
|
||||
s.Framer = info.StreamSerializer.Framer
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func getFromEtcd(keys clientv3.KV, path string) (*metaObject, error) {
|
||||
response, err := keys.Get(context.Background(), path)
|
||||
if err != nil {
|
||||
@@ -1128,7 +896,57 @@ func diffMapKeys(a, b interface{}, stringer func(interface{}) string) []string {
|
||||
return ret
|
||||
}
|
||||
|
||||
type allResourceSource struct{}
|
||||
type resourceToPersist struct {
|
||||
gvk schema.GroupVersionKind
|
||||
gvr schema.GroupVersionResource
|
||||
golangType reflect.Type
|
||||
namespaced bool
|
||||
}
|
||||
|
||||
func (*allResourceSource) AnyVersionForGroupEnabled(group string) bool { return true }
|
||||
func (*allResourceSource) VersionEnabled(version schema.GroupVersion) bool { return true }
|
||||
func getResourcesToPersist(serverResources []*metav1.APIResourceList, isOAPI bool, t *testing.T) []resourceToPersist {
|
||||
resourcesToPersist := []resourceToPersist{}
|
||||
|
||||
for _, discoveryGroup := range serverResources {
|
||||
for _, discoveryResource := range discoveryGroup.APIResources {
|
||||
// this is a subresource, skip it
|
||||
if strings.Contains(discoveryResource.Name, "/") {
|
||||
continue
|
||||
}
|
||||
hasCreate := false
|
||||
hasGet := false
|
||||
for _, verb := range discoveryResource.Verbs {
|
||||
if string(verb) == "get" {
|
||||
hasGet = true
|
||||
}
|
||||
if string(verb) == "create" {
|
||||
hasCreate = true
|
||||
}
|
||||
}
|
||||
if !(hasCreate && hasGet) {
|
||||
continue
|
||||
}
|
||||
|
||||
resourceGV, err := schema.ParseGroupVersion(discoveryGroup.GroupVersion)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gvk := resourceGV.WithKind(discoveryResource.Kind)
|
||||
if len(discoveryResource.Group) > 0 || len(discoveryResource.Version) > 0 {
|
||||
gvk = schema.GroupVersionKind{
|
||||
Group: discoveryResource.Group,
|
||||
Version: discoveryResource.Version,
|
||||
Kind: discoveryResource.Kind,
|
||||
}
|
||||
}
|
||||
gvr := resourceGV.WithResource(discoveryResource.Name)
|
||||
|
||||
resourcesToPersist = append(resourcesToPersist, resourceToPersist{
|
||||
gvk: gvk,
|
||||
gvr: gvr,
|
||||
namespaced: discoveryResource.Namespaced,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return resourcesToPersist
|
||||
}
|
||||
|
22
vendor/k8s.io/kubernetes/test/integration/evictions/BUILD
generated
vendored
22
vendor/k8s.io/kubernetes/test/integration/evictions/BUILD
generated
vendored
@@ -15,18 +15,18 @@ go_test(
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/controller/disruption:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
89
vendor/k8s.io/kubernetes/test/integration/evictions/evictions_test.go
generated
vendored
89
vendor/k8s.io/kubernetes/test/integration/evictions/evictions_test.go
generated
vendored
@@ -37,6 +37,7 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/disruption"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -165,6 +166,82 @@ func TestConcurrentEvictionRequests(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestTerminalPodEviction ensures that PDB is not checked for terminal pods.
|
||||
func TestTerminalPodEviction(t *testing.T) {
|
||||
s, closeFn, rm, informers, clientSet := rmSetup(t)
|
||||
defer closeFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("terminalpod-eviction", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(stopCh)
|
||||
defer close(stopCh)
|
||||
|
||||
config := restclient.Config{Host: s.URL}
|
||||
clientSet, err := clientset.NewForConfig(&config)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create clientset: %v", err)
|
||||
}
|
||||
|
||||
var gracePeriodSeconds int64 = 30
|
||||
deleteOption := &metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &gracePeriodSeconds,
|
||||
}
|
||||
pod := newPod("test-terminal-pod1")
|
||||
if _, err := clientSet.CoreV1().Pods(ns.Name).Create(pod); err != nil {
|
||||
t.Errorf("Failed to create pod: %v", err)
|
||||
}
|
||||
addPodConditionSucceeded(pod)
|
||||
if _, err := clientSet.CoreV1().Pods(ns.Name).UpdateStatus(pod); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
waitToObservePods(t, informers.Core().V1().Pods().Informer(), 1)
|
||||
|
||||
pdb := newPDB()
|
||||
if _, err := clientSet.Policy().PodDisruptionBudgets(ns.Name).Create(pdb); err != nil {
|
||||
t.Errorf("Failed to create PodDisruptionBudget: %v", err)
|
||||
}
|
||||
|
||||
pdbList, err := clientSet.Policy().PodDisruptionBudgets(ns.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error while listing pod disruption budget")
|
||||
}
|
||||
oldPdb := pdbList.Items[0]
|
||||
eviction := newEviction(ns.Name, pod.Name, deleteOption)
|
||||
err = wait.PollImmediate(5*time.Second, 60*time.Second, func() (bool, error) {
|
||||
e := clientSet.Policy().Evictions(ns.Name).Evict(eviction)
|
||||
switch {
|
||||
case errors.IsTooManyRequests(e):
|
||||
return false, nil
|
||||
case errors.IsConflict(e):
|
||||
return false, fmt.Errorf("Unexpected Conflict (409) error caused by failing to handle concurrent PDB updates: %v", e)
|
||||
case e == nil:
|
||||
return true, nil
|
||||
default:
|
||||
return false, e
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Eviction of pod failed %v", err)
|
||||
}
|
||||
pdbList, err = clientSet.Policy().PodDisruptionBudgets(ns.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error while listing pod disruption budget")
|
||||
}
|
||||
newPdb := pdbList.Items[0]
|
||||
// We shouldn't see an update in pod disruption budget status' generation number as we are evicting terminal pods without checking for pod disruption.
|
||||
if !reflect.DeepEqual(newPdb.Status.ObservedGeneration, oldPdb.Status.ObservedGeneration) {
|
||||
t.Fatalf("Expected the pdb generation to be of same value %v but got %v", newPdb.Status.ObservedGeneration, oldPdb.Status.ObservedGeneration)
|
||||
}
|
||||
|
||||
if err := clientSet.Policy().PodDisruptionBudgets(ns.Name).Delete(pdb.Name, deleteOption); err != nil {
|
||||
t.Fatalf("Failed to delete pod disruption budget")
|
||||
}
|
||||
}
|
||||
|
||||
func newPod(podName string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -182,6 +259,18 @@ func newPod(podName string) *v1.Pod {
|
||||
}
|
||||
}
|
||||
|
||||
func addPodConditionSucceeded(pod *v1.Pod) {
|
||||
pod.Status = v1.PodStatus{
|
||||
Phase: v1.PodSucceeded,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func addPodConditionReady(pod *v1.Pod) {
|
||||
pod.Status = v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
|
41
vendor/k8s.io/kubernetes/test/integration/examples/BUILD
generated
vendored
41
vendor/k8s.io/kubernetes/test/integration/examples/BUILD
generated
vendored
@@ -20,28 +20,31 @@ go_test(
|
||||
"//cmd/kube-apiserver/app/options:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//pkg/master/reconcilers:go_default_library",
|
||||
"//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/apis/audit:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
|
||||
"//staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//staging/src/k8s.io/kube-aggregator/pkg/cmd/server:go_default_library",
|
||||
"//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/sample-apiserver/pkg/cmd/server:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/pborman/uuid:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/cert:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/cmd/server:go_default_library",
|
||||
"//vendor/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/sample-apiserver/pkg/apis/wardle/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/sample-apiserver/pkg/cmd/server:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
50
vendor/k8s.io/kubernetes/test/integration/examples/apiserver_test.go
generated
vendored
50
vendor/k8s.io/kubernetes/test/integration/examples/apiserver_test.go
generated
vendored
@@ -31,10 +31,13 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
genericapiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
discovery "k8s.io/client-go/discovery"
|
||||
client "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
@@ -113,7 +116,7 @@ func TestAggregatedAPIServer(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerConfig, sharedInformers, versionedInformers, _, _, _, admissionPostStartHook, err := app.CreateKubeAPIServerConfig(completedOptions, tunneler, proxyTransport)
|
||||
kubeAPIServerConfig, _, _, _, admissionPostStartHook, err := app.CreateKubeAPIServerConfig(completedOptions, tunneler, proxyTransport)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -124,7 +127,7 @@ func TestAggregatedAPIServer(t *testing.T) {
|
||||
kubeAPIServerClientConfig.ServerName = ""
|
||||
kubeClientConfigValue.Store(kubeAPIServerClientConfig)
|
||||
|
||||
kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.NewEmptyDelegate(), sharedInformers, versionedInformers, admissionPostStartHook)
|
||||
kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.NewEmptyDelegate(), admissionPostStartHook)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -331,10 +334,13 @@ func TestAggregatedAPIServer(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// this is ugly, but sleep just a little bit so that the watch is probably observed. Since nothing will actually be added to discovery
|
||||
// (the service is missing), we don't have an external signal.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if _, err := aggregatorDiscoveryClient.Discovery().ServerResources(); err != nil {
|
||||
// wait for the unavailable API service to be processed with updated status
|
||||
err = wait.Poll(100*time.Millisecond, 5*time.Second, func() (done bool, err error) {
|
||||
_, err = aggregatorDiscoveryClient.Discovery().ServerResources()
|
||||
hasExpectedError := checkWardleUnavailableDiscoveryError(t, err)
|
||||
return hasExpectedError, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -357,13 +363,41 @@ func TestAggregatedAPIServer(t *testing.T) {
|
||||
// (the service is missing), we don't have an external signal.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
_, err = aggregatorDiscoveryClient.Discovery().ServerResources()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
hasExpectedError := checkWardleUnavailableDiscoveryError(t, err)
|
||||
if !hasExpectedError {
|
||||
t.Fatalf("Discovery call didn't return expected error: %v", err)
|
||||
}
|
||||
|
||||
// TODO figure out how to turn on enough of services and dns to run more
|
||||
}
|
||||
|
||||
func checkWardleUnavailableDiscoveryError(t *testing.T, err error) bool {
|
||||
if err == nil {
|
||||
t.Log("Discovery call expected to return failed unavailable service")
|
||||
return false
|
||||
}
|
||||
if !discovery.IsGroupDiscoveryFailedError(err) {
|
||||
t.Logf("Unexpected error: %T, %v", err, err)
|
||||
return false
|
||||
}
|
||||
discoveryErr := err.(*discovery.ErrGroupDiscoveryFailed)
|
||||
if len(discoveryErr.Groups) != 1 {
|
||||
t.Logf("Unexpected failed groups: %v", err)
|
||||
return false
|
||||
}
|
||||
groupVersion := schema.GroupVersion{Group: "wardle.k8s.io", Version: "v1alpha1"}
|
||||
groupVersionErr, ok := discoveryErr.Groups[groupVersion]
|
||||
if !ok {
|
||||
t.Logf("Unexpected failed group version: %v", err)
|
||||
return false
|
||||
}
|
||||
if !apierrors.IsServiceUnavailable(groupVersionErr) {
|
||||
t.Logf("Unexpected failed group version error: %v", err)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func createKubeConfig(clientCfg *rest.Config) *clientcmdapi.Config {
|
||||
clusterNick := "cluster"
|
||||
userNick := "user"
|
||||
|
4
vendor/k8s.io/kubernetes/test/integration/examples/setup_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/examples/setup_test.go
generated
vendored
@@ -111,7 +111,7 @@ func startTestServer(t *testing.T, stopCh <-chan struct{}, setup TestServerSetup
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerConfig, sharedInformers, versionedInformers, _, _, _, admissionPostStartHook, err := app.CreateKubeAPIServerConfig(completedOptions, tunneler, proxyTransport)
|
||||
kubeAPIServerConfig, _, _, _, admissionPostStartHook, err := app.CreateKubeAPIServerConfig(completedOptions, tunneler, proxyTransport)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -119,7 +119,7 @@ func startTestServer(t *testing.T, stopCh <-chan struct{}, setup TestServerSetup
|
||||
if setup.ModifyServerConfig != nil {
|
||||
setup.ModifyServerConfig(kubeAPIServerConfig)
|
||||
}
|
||||
kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.NewEmptyDelegate(), sharedInformers, versionedInformers, admissionPostStartHook)
|
||||
kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.NewEmptyDelegate(), admissionPostStartHook)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/test/integration/examples/webhook_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/integration/examples/webhook_test.go
generated
vendored
@@ -111,8 +111,14 @@ type auditSinkFunc func(events ...*auditinternal.Event)
|
||||
func (f auditSinkFunc) ProcessEvents(events ...*auditinternal.Event) {
|
||||
f(events...)
|
||||
}
|
||||
|
||||
func (auditSinkFunc) Run(stopCh <-chan struct{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (auditSinkFunc) Shutdown() {
|
||||
}
|
||||
|
||||
func (auditSinkFunc) String() string {
|
||||
return ""
|
||||
}
|
||||
|
56
vendor/k8s.io/kubernetes/test/integration/framework/BUILD
generated
vendored
56
vendor/k8s.io/kubernetes/test/integration/framework/BUILD
generated
vendored
@@ -28,39 +28,39 @@ go_library(
|
||||
"//pkg/master:go_default_library",
|
||||
"//pkg/util/env:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/certificates/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/authenticatorfactory:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/request/union:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authorization/union:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/endpoints/openapi:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server/storage:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/go-openapi/spec:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/pborman/uuid:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/request/union:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/union:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/endpoints/openapi:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/storage:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
29
vendor/k8s.io/kubernetes/test/integration/framework/master_utils.go
generated
vendored
29
vendor/k8s.io/kubernetes/test/integration/framework/master_utils.go
generated
vendored
@@ -119,6 +119,7 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
closeFn := func() {
|
||||
m.GenericAPIServer.RunPreShutdownHooks()
|
||||
close(stopCh)
|
||||
s.Close()
|
||||
}
|
||||
@@ -177,8 +178,8 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv
|
||||
glog.Fatal(err)
|
||||
}
|
||||
|
||||
sharedInformers := informers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout)
|
||||
m, err = masterConfig.Complete(sharedInformers).New(genericapiserver.NewEmptyDelegate())
|
||||
masterConfig.ExtraConfig.VersionedInformers = informers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout)
|
||||
m, err = masterConfig.Complete().New(genericapiserver.NewEmptyDelegate())
|
||||
if err != nil {
|
||||
closeFn()
|
||||
glog.Fatalf("error in bringing up the master: %v", err)
|
||||
@@ -225,6 +226,10 @@ func NewIntegrationTestMasterConfig() *master.Config {
|
||||
masterConfig := NewMasterConfig()
|
||||
masterConfig.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4")
|
||||
masterConfig.ExtraConfig.APIResourceConfigSource = master.DefaultAPIResourceConfigSource()
|
||||
|
||||
// TODO: get rid of these tests or port them to secure serving
|
||||
masterConfig.GenericConfig.SecureServing = &genericapiserver.SecureServingInfo{Listener: fakeLocalhost443Listener{}}
|
||||
|
||||
return masterConfig
|
||||
}
|
||||
|
||||
@@ -291,6 +296,9 @@ func NewMasterConfig() *master.Config {
|
||||
genericConfig.Version = &kubeVersion
|
||||
genericConfig.Authorization.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer()
|
||||
|
||||
// TODO: get rid of these tests or port them to secure serving
|
||||
genericConfig.SecureServing = &genericapiserver.SecureServingInfo{Listener: fakeLocalhost443Listener{}}
|
||||
|
||||
err := etcdOptions.ApplyWithStorageFactoryTo(storageFactory, genericConfig)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -329,3 +337,20 @@ func SharedEtcd() *storagebackend.Config {
|
||||
cfg.ServerList = []string{GetEtcdURL()}
|
||||
return cfg
|
||||
}
|
||||
|
||||
type fakeLocalhost443Listener struct{}
|
||||
|
||||
func (fakeLocalhost443Listener) Accept() (net.Conn, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fakeLocalhost443Listener) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fakeLocalhost443Listener) Addr() net.Addr {
|
||||
return &net.TCPAddr{
|
||||
IP: net.IPv4(127, 0, 0, 1),
|
||||
Port: 443,
|
||||
}
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/integration/framework/serializer.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/framework/serializer.go
generated
vendored
@@ -45,9 +45,9 @@ func (s *wrappedSerializer) UniversalDeserializer() runtime.Decoder {
|
||||
}
|
||||
|
||||
func (s *wrappedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder {
|
||||
return versioning.NewCodec(encoder, nil, s.scheme, s.scheme, s.scheme, s.scheme, gv, nil)
|
||||
return versioning.NewCodec(encoder, nil, s.scheme, s.scheme, s.scheme, s.scheme, gv, nil, s.scheme.Name())
|
||||
}
|
||||
|
||||
func (s *wrappedSerializer) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder {
|
||||
return versioning.NewCodec(nil, decoder, s.scheme, s.scheme, s.scheme, s.scheme, nil, gv)
|
||||
return versioning.NewCodec(nil, decoder, s.scheme, s.scheme, s.scheme, s.scheme, nil, gv, s.scheme.Name())
|
||||
}
|
||||
|
38
vendor/k8s.io/kubernetes/test/integration/garbagecollector/BUILD
generated
vendored
38
vendor/k8s.io/kubernetes/test/integration/garbagecollector/BUILD
generated
vendored
@@ -12,27 +12,27 @@ go_test(
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
"//pkg/controller/garbagecollector:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery/cached:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery/cached:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
12
vendor/k8s.io/kubernetes/test/integration/garbagecollector/garbage_collector_test.go
generated
vendored
12
vendor/k8s.io/kubernetes/test/integration/garbagecollector/garbage_collector_test.go
generated
vendored
@@ -27,7 +27,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/testserver"
|
||||
apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/fixtures"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -812,7 +812,7 @@ func TestCustomResourceCascadingDeletion(t *testing.T) {
|
||||
|
||||
// Create a custom owner resource.
|
||||
owner := newCRDInstance(definition, ns.Name, names.SimpleNameGenerator.GenerateName("owner"))
|
||||
owner, err := resourceClient.Create(owner)
|
||||
owner, err := resourceClient.Create(owner, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create owner resource %q: %v", owner.GetName(), err)
|
||||
}
|
||||
@@ -822,7 +822,7 @@ func TestCustomResourceCascadingDeletion(t *testing.T) {
|
||||
dependent := newCRDInstance(definition, ns.Name, names.SimpleNameGenerator.GenerateName("dependent"))
|
||||
link(t, owner, dependent)
|
||||
|
||||
dependent, err = resourceClient.Create(dependent)
|
||||
dependent, err = resourceClient.Create(dependent, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create dependent resource %q: %v", dependent.GetName(), err)
|
||||
}
|
||||
@@ -873,7 +873,7 @@ func TestMixedRelationships(t *testing.T) {
|
||||
definition, resourceClient := createRandomCustomResourceDefinition(t, apiExtensionClient, dynamicClient, ns.Name)
|
||||
|
||||
// Create a custom owner resource.
|
||||
customOwner, err := resourceClient.Create(newCRDInstance(definition, ns.Name, names.SimpleNameGenerator.GenerateName("owner")))
|
||||
customOwner, err := resourceClient.Create(newCRDInstance(definition, ns.Name, names.SimpleNameGenerator.GenerateName("owner")), metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create owner: %v", err)
|
||||
}
|
||||
@@ -900,7 +900,7 @@ func TestMixedRelationships(t *testing.T) {
|
||||
coreOwner.TypeMeta.Kind = "ConfigMap"
|
||||
coreOwner.TypeMeta.APIVersion = "v1"
|
||||
link(t, coreOwner, customDependent)
|
||||
customDependent, err = resourceClient.Create(customDependent)
|
||||
customDependent, err = resourceClient.Create(customDependent, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create dependent: %v", err)
|
||||
}
|
||||
@@ -971,7 +971,7 @@ func TestCRDDeletionCascading(t *testing.T) {
|
||||
definition, resourceClient := createRandomCustomResourceDefinition(t, apiExtensionClient, dynamicClient, ns.Name)
|
||||
|
||||
// Create a custom owner resource.
|
||||
owner, err := resourceClient.Create(newCRDInstance(definition, ns.Name, names.SimpleNameGenerator.GenerateName("owner")))
|
||||
owner, err := resourceClient.Create(newCRDInstance(definition, ns.Name, names.SimpleNameGenerator.GenerateName("owner")), metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create owner: %v", err)
|
||||
}
|
||||
|
26
vendor/k8s.io/kubernetes/test/integration/ipamperf/BUILD
generated
vendored
26
vendor/k8s.io/kubernetes/test/integration/ipamperf/BUILD
generated
vendored
@@ -14,13 +14,13 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/controller/nodeipam:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/integration/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -53,18 +53,18 @@ go_library(
|
||||
"//pkg/controller/nodeipam/ipam:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
|
||||
"//pkg/controller/util/node:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//test/integration/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v0.beta:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/ipamperf/ipam_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/ipamperf/ipam_test.go
generated
vendored
@@ -95,7 +95,7 @@ func runTest(t *testing.T, apiURL string, config *Config, clusterCIDR, serviceCI
|
||||
func logResults(allResults []*Results) {
|
||||
jStr, err := json.MarshalIndent(allResults, "", " ")
|
||||
if err != nil {
|
||||
glog.Errorf("Error formating results: %v", err)
|
||||
glog.Errorf("Error formatting results: %v", err)
|
||||
return
|
||||
}
|
||||
if resultsLogFile != "" {
|
||||
|
201
vendor/k8s.io/kubernetes/test/integration/master/BUILD
generated
vendored
201
vendor/k8s.io/kubernetes/test/integration/master/BUILD
generated
vendored
@@ -10,44 +10,14 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"audit_test.go",
|
||||
"crd_test.go",
|
||||
"kms_transformation_test.go",
|
||||
"kube_apiserver_test.go",
|
||||
"main_test.go",
|
||||
"secrets_transformation_test.go",
|
||||
"synthetic_master_test.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
@@ -56,70 +26,76 @@ go_test(
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//staging/src/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/networking/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/apis/audit:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/apis/audit/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/group:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/request/bearertoken:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/features:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/aes:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/tokentest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/evanphx/json-patch:go_default_library",
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
"//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/networking/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/group:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/features:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/tokentest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
@@ -141,114 +117,73 @@ filegroup(
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"kms_plugin_mock.go",
|
||||
"transformation_testcase.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/master",
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
|
264
vendor/k8s.io/kubernetes/test/integration/master/audit_test.go
generated
vendored
Normal file
264
vendor/k8s.io/kubernetes/test/integration/master/audit_test.go
generated
vendored
Normal file
@@ -0,0 +1,264 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package master
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
auditinternal "k8s.io/apiserver/pkg/apis/audit"
|
||||
auditv1 "k8s.io/apiserver/pkg/apis/audit/v1"
|
||||
auditv1beta1 "k8s.io/apiserver/pkg/apis/audit/v1beta1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
var (
|
||||
auditPolicyPattern = `
|
||||
apiVersion: {version}
|
||||
kind: Policy
|
||||
rules:
|
||||
- level: RequestResponse
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["configmaps"]
|
||||
|
||||
`
|
||||
namespace = "default"
|
||||
watchTestTimeout int64 = 1
|
||||
watchOptions = metav1.ListOptions{TimeoutSeconds: &watchTestTimeout}
|
||||
patch, _ = json.Marshal(jsonpatch.Patch{})
|
||||
auditTestUser = "system:apiserver"
|
||||
versions = map[string]schema.GroupVersion{
|
||||
"audit.k8s.io/v1": auditv1.SchemeGroupVersion,
|
||||
"audit.k8s.io/v1beta1": auditv1beta1.SchemeGroupVersion,
|
||||
}
|
||||
)
|
||||
|
||||
// TestAudit ensures that both v1beta1 and v1 version audit api could work.
|
||||
func TestAudit(t *testing.T) {
|
||||
for version := range versions {
|
||||
testAudit(t, version)
|
||||
}
|
||||
}
|
||||
|
||||
func testAudit(t *testing.T, version string) {
|
||||
// prepare audit policy file
|
||||
auditPolicy := []byte(strings.Replace(auditPolicyPattern, "{version}", version, 1))
|
||||
policyFile, err := ioutil.TempFile("", "audit-policy.yaml")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create audit policy file: %v", err)
|
||||
}
|
||||
defer os.Remove(policyFile.Name())
|
||||
if _, err := policyFile.Write(auditPolicy); err != nil {
|
||||
t.Fatalf("Failed to write audit policy file: %v", err)
|
||||
}
|
||||
if err := policyFile.Close(); err != nil {
|
||||
t.Fatalf("Failed to close audit policy file: %v", err)
|
||||
}
|
||||
|
||||
// prepare audit log file
|
||||
logFile, err := ioutil.TempFile("", "audit.log")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create audit log file: %v", err)
|
||||
}
|
||||
defer os.Remove(logFile.Name())
|
||||
|
||||
// start api server
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil,
|
||||
[]string{
|
||||
"--audit-policy-file", policyFile.Name(),
|
||||
"--audit-log-version", version,
|
||||
"--audit-log-mode", "blocking",
|
||||
"--audit-log-path", logFile.Name()},
|
||||
framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
kubeclient, err := kubernetes.NewForConfig(result.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
func() {
|
||||
// create, get, watch, update, patch, list and delete configmap.
|
||||
configMap := &apiv1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "audit-configmap",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"map-key": "map-value",
|
||||
},
|
||||
}
|
||||
|
||||
_, err := kubeclient.CoreV1().ConfigMaps(namespace).Create(configMap)
|
||||
expectNoError(t, err, "failed to create audit-configmap")
|
||||
|
||||
_, err = kubeclient.CoreV1().ConfigMaps(namespace).Get(configMap.Name, metav1.GetOptions{})
|
||||
expectNoError(t, err, "failed to get audit-configmap")
|
||||
|
||||
configMapChan, err := kubeclient.CoreV1().ConfigMaps(namespace).Watch(watchOptions)
|
||||
expectNoError(t, err, "failed to create watch for config maps")
|
||||
for range configMapChan.ResultChan() {
|
||||
// Block until watchOptions.TimeoutSeconds expires.
|
||||
// If the test finishes before watchOptions.TimeoutSeconds expires, the watch audit
|
||||
// event at stage ResponseComplete will not be generated.
|
||||
}
|
||||
|
||||
_, err = kubeclient.CoreV1().ConfigMaps(namespace).Update(configMap)
|
||||
expectNoError(t, err, "failed to update audit-configmap")
|
||||
|
||||
_, err = kubeclient.CoreV1().ConfigMaps(namespace).Patch(configMap.Name, types.JSONPatchType, patch)
|
||||
expectNoError(t, err, "failed to patch configmap")
|
||||
|
||||
_, err = kubeclient.CoreV1().ConfigMaps(namespace).List(metav1.ListOptions{})
|
||||
expectNoError(t, err, "failed to list config maps")
|
||||
|
||||
err = kubeclient.CoreV1().ConfigMaps(namespace).Delete(configMap.Name, &metav1.DeleteOptions{})
|
||||
expectNoError(t, err, "failed to delete audit-configmap")
|
||||
}()
|
||||
|
||||
expectedEvents := []utils.AuditEvent{
|
||||
{
|
||||
Level: auditinternal.LevelRequestResponse,
|
||||
Stage: auditinternal.StageResponseComplete,
|
||||
RequestURI: fmt.Sprintf("/api/v1/namespaces/%s/configmaps", namespace),
|
||||
Verb: "create",
|
||||
Code: 201,
|
||||
User: auditTestUser,
|
||||
Resource: "configmaps",
|
||||
Namespace: namespace,
|
||||
RequestObject: true,
|
||||
ResponseObject: true,
|
||||
AuthorizeDecision: "allow",
|
||||
}, {
|
||||
Level: auditinternal.LevelRequestResponse,
|
||||
Stage: auditinternal.StageResponseComplete,
|
||||
RequestURI: fmt.Sprintf("/api/v1/namespaces/%s/configmaps/audit-configmap", namespace),
|
||||
Verb: "get",
|
||||
Code: 200,
|
||||
User: auditTestUser,
|
||||
Resource: "configmaps",
|
||||
Namespace: namespace,
|
||||
RequestObject: false,
|
||||
ResponseObject: true,
|
||||
AuthorizeDecision: "allow",
|
||||
}, {
|
||||
Level: auditinternal.LevelRequestResponse,
|
||||
Stage: auditinternal.StageResponseComplete,
|
||||
RequestURI: fmt.Sprintf("/api/v1/namespaces/%s/configmaps", namespace),
|
||||
Verb: "list",
|
||||
Code: 200,
|
||||
User: auditTestUser,
|
||||
Resource: "configmaps",
|
||||
Namespace: namespace,
|
||||
RequestObject: false,
|
||||
ResponseObject: true,
|
||||
AuthorizeDecision: "allow",
|
||||
}, {
|
||||
Level: auditinternal.LevelRequestResponse,
|
||||
Stage: auditinternal.StageResponseStarted,
|
||||
RequestURI: fmt.Sprintf("/api/v1/namespaces/%s/configmaps?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout),
|
||||
Verb: "watch",
|
||||
Code: 200,
|
||||
User: auditTestUser,
|
||||
Resource: "configmaps",
|
||||
Namespace: namespace,
|
||||
RequestObject: false,
|
||||
ResponseObject: false,
|
||||
AuthorizeDecision: "allow",
|
||||
}, {
|
||||
Level: auditinternal.LevelRequestResponse,
|
||||
Stage: auditinternal.StageResponseComplete,
|
||||
RequestURI: fmt.Sprintf("/api/v1/namespaces/%s/configmaps?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout),
|
||||
Verb: "watch",
|
||||
Code: 200,
|
||||
User: auditTestUser,
|
||||
Resource: "configmaps",
|
||||
Namespace: namespace,
|
||||
RequestObject: false,
|
||||
ResponseObject: false,
|
||||
AuthorizeDecision: "allow",
|
||||
}, {
|
||||
Level: auditinternal.LevelRequestResponse,
|
||||
Stage: auditinternal.StageResponseComplete,
|
||||
RequestURI: fmt.Sprintf("/api/v1/namespaces/%s/configmaps/audit-configmap", namespace),
|
||||
Verb: "update",
|
||||
Code: 200,
|
||||
User: auditTestUser,
|
||||
Resource: "configmaps",
|
||||
Namespace: namespace,
|
||||
RequestObject: true,
|
||||
ResponseObject: true,
|
||||
AuthorizeDecision: "allow",
|
||||
}, {
|
||||
Level: auditinternal.LevelRequestResponse,
|
||||
Stage: auditinternal.StageResponseComplete,
|
||||
RequestURI: fmt.Sprintf("/api/v1/namespaces/%s/configmaps/audit-configmap", namespace),
|
||||
Verb: "patch",
|
||||
Code: 200,
|
||||
User: auditTestUser,
|
||||
Resource: "configmaps",
|
||||
Namespace: namespace,
|
||||
RequestObject: true,
|
||||
ResponseObject: true,
|
||||
AuthorizeDecision: "allow",
|
||||
}, {
|
||||
Level: auditinternal.LevelRequestResponse,
|
||||
Stage: auditinternal.StageResponseComplete,
|
||||
RequestURI: fmt.Sprintf("/api/v1/namespaces/%s/configmaps/audit-configmap", namespace),
|
||||
Verb: "delete",
|
||||
Code: 200,
|
||||
User: auditTestUser,
|
||||
Resource: "configmaps",
|
||||
Namespace: namespace,
|
||||
RequestObject: true,
|
||||
ResponseObject: true,
|
||||
AuthorizeDecision: "allow",
|
||||
},
|
||||
}
|
||||
|
||||
stream, err := os.Open(logFile.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
defer stream.Close()
|
||||
missing, err := utils.CheckAuditLines(stream, expectedEvents, versions[version])
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
if len(missing) > 0 {
|
||||
t.Errorf("Failed to match all expected events, events %#v not found!", missing)
|
||||
}
|
||||
}
|
||||
|
||||
func expectNoError(t *testing.T, err error, msg string) {
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %v", msg, err)
|
||||
}
|
||||
}
|
4
vendor/k8s.io/kubernetes/test/integration/master/crd_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/master/crd_test.go
generated
vendored
@@ -199,7 +199,7 @@ func TestCRD(t *testing.T) {
|
||||
}
|
||||
createErr := make(chan error, 1)
|
||||
go func() {
|
||||
_, err := dynamicClient.Resource(fooResource).Namespace("default").Create(unstructuredFoo)
|
||||
_, err := dynamicClient.Resource(fooResource).Namespace("default").Create(unstructuredFoo, metav1.CreateOptions{})
|
||||
t.Logf("Foo instance create returned: %v", err)
|
||||
if err != nil {
|
||||
createErr <- err
|
||||
@@ -272,7 +272,7 @@ func TestCRD(t *testing.T) {
|
||||
}
|
||||
fooUnstructured.UnmarshalJSON(bs)
|
||||
|
||||
_, err = dynamicClient.Resource(fooResource).Namespace("default").Update(fooUnstructured)
|
||||
_, err = dynamicClient.Resource(fooResource).Namespace("default").Update(fooUnstructured, metav1.UpdateOptions{})
|
||||
if err != nil && !errors.IsConflict(err) {
|
||||
t.Fatalf("Failed to update Foo instance: %v", err)
|
||||
} else if err == nil {
|
||||
|
17
vendor/k8s.io/kubernetes/test/integration/master/kms_plugin_mock.go
generated
vendored
17
vendor/k8s.io/kubernetes/test/integration/master/kms_plugin_mock.go
generated
vendored
@@ -23,9 +23,7 @@ import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/golang/glog"
|
||||
@@ -34,7 +32,7 @@ import (
|
||||
|
||||
const (
|
||||
kmsAPIVersion = "v1beta1"
|
||||
sockFile = "/tmp/kms-provider.sock"
|
||||
sockFile = "@kms-provider.sock"
|
||||
unixProtocol = "unix"
|
||||
)
|
||||
|
||||
@@ -49,10 +47,6 @@ type base64Plugin struct {
|
||||
}
|
||||
|
||||
func NewBase64Plugin() (*base64Plugin, error) {
|
||||
if err := cleanSockFile(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
listener, err := net.Listen(unixProtocol, sockFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to listen on the unix socket, error: %v", err)
|
||||
@@ -75,7 +69,6 @@ func NewBase64Plugin() (*base64Plugin, error) {
|
||||
func (s *base64Plugin) cleanUp() {
|
||||
s.grpcServer.Stop()
|
||||
s.listener.Close()
|
||||
cleanSockFile()
|
||||
}
|
||||
|
||||
var testProviderAPIVersion = kmsAPIVersion
|
||||
@@ -105,11 +98,3 @@ func (s *base64Plugin) Encrypt(ctx context.Context, request *kmsapi.EncryptReque
|
||||
|
||||
return &kmsapi.EncryptResponse{Cipher: buf}, nil
|
||||
}
|
||||
|
||||
func cleanSockFile() error {
|
||||
err := unix.Unlink(sockFile)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to delete the socket file, error: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/master/kms_transformation_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/master/kms_transformation_test.go
generated
vendored
@@ -48,7 +48,7 @@ resources:
|
||||
- kms:
|
||||
name: grpc-kms-provider
|
||||
cachesize: 1000
|
||||
endpoint: unix:///tmp/kms-provider.sock
|
||||
endpoint: unix:///@kms-provider.sock
|
||||
`
|
||||
)
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/test/integration/master/synthetic_master_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/master/synthetic_master_test.go
generated
vendored
@@ -175,7 +175,7 @@ func TestStatus(t *testing.T) {
|
||||
statusCode: http.StatusForbidden,
|
||||
reqPath: "/apis",
|
||||
reason: "Forbidden",
|
||||
message: `forbidden: User "" cannot get path "/apis": Everything is forbidden.`,
|
||||
message: `forbidden: User "" cannot get path "/apis"`,
|
||||
},
|
||||
{
|
||||
name: "401",
|
||||
@@ -862,7 +862,7 @@ func TestUpdateNodeObjects(t *testing.T) {
|
||||
Reason: "bar",
|
||||
},
|
||||
}
|
||||
case i%4 == 1:
|
||||
case i%4 == 2:
|
||||
lastCount = 0
|
||||
n.Status.Conditions = nil
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/test/integration/metrics/BUILD
generated
vendored
8
vendor/k8s.io/kubernetes/test/integration/metrics/BUILD
generated
vendored
@@ -35,13 +35,13 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_model/go:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
10
vendor/k8s.io/kubernetes/test/integration/objectmeta/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/test/integration/objectmeta/BUILD
generated
vendored
@@ -14,13 +14,13 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/openshift/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/openshift/BUILD
generated
vendored
@@ -15,8 +15,8 @@ go_test(
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/master:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
10
vendor/k8s.io/kubernetes/test/integration/pods/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/test/integration/pods/BUILD
generated
vendored
@@ -14,13 +14,13 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
25
vendor/k8s.io/kubernetes/test/integration/quota/BUILD
generated
vendored
25
vendor/k8s.io/kubernetes/test/integration/quota/BUILD
generated
vendored
@@ -23,19 +23,20 @@ go_test(
|
||||
"//pkg/quota/install:go_default_library",
|
||||
"//plugin/pkg/admission/resourcequota:go_default_library",
|
||||
"//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
10
vendor/k8s.io/kubernetes/test/integration/quota/quota_test.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/integration/quota/quota_test.go
generated
vendored
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package quota
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -35,6 +36,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@@ -161,7 +163,9 @@ func waitForQuota(t *testing.T, quota *v1.ResourceQuota, clientset *clientset.Cl
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
_, err = watch.Until(1*time.Minute, w, func(event watch.Event) (bool, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, w, func(event watch.Event) (bool, error) {
|
||||
switch event.Type {
|
||||
case watch.Modified:
|
||||
default:
|
||||
@@ -218,7 +222,9 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
_, err = watch.Until(3*time.Minute, w, func(event watch.Event) (bool, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, w, func(event watch.Event) (bool, error) {
|
||||
switch event.Type {
|
||||
case watch.Modified:
|
||||
default:
|
||||
|
28
vendor/k8s.io/kubernetes/test/integration/replicaset/BUILD
generated
vendored
28
vendor/k8s.io/kubernetes/test/integration/replicaset/BUILD
generated
vendored
@@ -17,22 +17,22 @@ go_test(
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/controller/replicaset:go_default_library",
|
||||
"//pkg/util/slice:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
26
vendor/k8s.io/kubernetes/test/integration/replicaset/replicaset_test.go
generated
vendored
26
vendor/k8s.io/kubernetes/test/integration/replicaset/replicaset_test.go
generated
vendored
@@ -112,32 +112,6 @@ func newMatchingPod(podName, namespace string) *v1.Pod {
|
||||
}
|
||||
}
|
||||
|
||||
// verifyRemainingObjects verifies if the number of the remaining replica
|
||||
// sets and pods are rsNum and podNum. It returns error if the
|
||||
// communication with the API server fails.
|
||||
func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rsNum, podNum int) (bool, error) {
|
||||
rsClient := clientSet.ExtensionsV1beta1().ReplicaSets(namespace)
|
||||
podClient := clientSet.CoreV1().Pods(namespace)
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list pods: %v", err)
|
||||
}
|
||||
var ret = true
|
||||
if len(pods.Items) != podNum {
|
||||
ret = false
|
||||
t.Logf("expect %d pods, got %d pods", podNum, len(pods.Items))
|
||||
}
|
||||
rss, err := rsClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list replica sets: %v", err)
|
||||
}
|
||||
if len(rss.Items) != rsNum {
|
||||
ret = false
|
||||
t.Logf("expect %d RSs, got %d RSs", rsNum, len(rss.Items))
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func rmSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *replicaset.ReplicaSetController, informers.SharedInformerFactory, clientset.Interface) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
|
24
vendor/k8s.io/kubernetes/test/integration/replicationcontroller/BUILD
generated
vendored
24
vendor/k8s.io/kubernetes/test/integration/replicationcontroller/BUILD
generated
vendored
@@ -16,19 +16,19 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/controller/replication:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -105,32 +105,6 @@ func newMatchingPod(podName, namespace string) *v1.Pod {
|
||||
}
|
||||
}
|
||||
|
||||
// verifyRemainingObjects verifies if the number of the remaining replication
|
||||
// controllers and pods are rcNum and podNum. It returns error if the
|
||||
// communication with the API server fails.
|
||||
func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rcNum, podNum int) (bool, error) {
|
||||
rcClient := clientSet.CoreV1().ReplicationControllers(namespace)
|
||||
podClient := clientSet.CoreV1().Pods(namespace)
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list pods: %v", err)
|
||||
}
|
||||
var ret = true
|
||||
if len(pods.Items) != podNum {
|
||||
ret = false
|
||||
t.Logf("expect %d pods, got %d pods", podNum, len(pods.Items))
|
||||
}
|
||||
rcs, err := rcClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list replication controllers: %v", err)
|
||||
}
|
||||
if len(rcs.Items) != rcNum {
|
||||
ret = false
|
||||
t.Logf("expect %d RCs, got %d RCs", rcNum, len(rcs.Items))
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func rmSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *replication.ReplicationManager, informers.SharedInformerFactory, clientset.Interface) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
|
12
vendor/k8s.io/kubernetes/test/integration/scale/BUILD
generated
vendored
12
vendor/k8s.io/kubernetes/test/integration/scale/BUILD
generated
vendored
@@ -12,15 +12,15 @@ go_test(
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1beta2:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library",
|
||||
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
89
vendor/k8s.io/kubernetes/test/integration/scheduler/BUILD
generated
vendored
89
vendor/k8s.io/kubernetes/test/integration/scheduler/BUILD
generated
vendored
@@ -25,47 +25,51 @@ go_test(
|
||||
"//cmd/kube-scheduler/app:go_default_library",
|
||||
"//cmd/kube-scheduler/app/config:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/controller/nodelifecycle:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume/options:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -96,26 +100,27 @@ go_library(
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
62
vendor/k8s.io/kubernetes/test/integration/scheduler/priorities_test.go
generated
vendored
62
vendor/k8s.io/kubernetes/test/integration/scheduler/priorities_test.go
generated
vendored
@@ -22,6 +22,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// This file tests the scheduler priority functions.
|
||||
@@ -172,3 +173,64 @@ func TestPodAffinity(t *testing.T) {
|
||||
}
|
||||
t.Errorf("Pod %v got scheduled on an unexpected node: %v.", podName, pod.Spec.NodeName)
|
||||
}
|
||||
|
||||
// TestImageLocality verifies that the scheduler's image locality priority function
|
||||
// works correctly, i.e., the pod gets scheduled to the node where its container images are ready.
|
||||
func TestImageLocality(t *testing.T) {
|
||||
context := initTest(t, "image-locality")
|
||||
defer cleanupTest(t, context)
|
||||
|
||||
// Add a few nodes.
|
||||
_, err := createNodes(context.clientSet, "testnode", nil, 10)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create nodes: %v", err)
|
||||
}
|
||||
|
||||
// We use a fake large image as the test image used by the pod, which has relatively large image size.
|
||||
image := v1.ContainerImage{
|
||||
Names: []string{
|
||||
"fake-large-image:v1",
|
||||
},
|
||||
SizeBytes: 3000 * 1024 * 1024,
|
||||
}
|
||||
|
||||
// Create a node with the large image
|
||||
nodeWithLargeImage, err := createNodeWithImages(context.clientSet, "testnode-large-image", nil, []v1.ContainerImage{image})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create node with a large image: %v", err)
|
||||
}
|
||||
|
||||
// Create a pod with containers each having the specified image.
|
||||
podName := "pod-using-large-image"
|
||||
pod, err := runPodWithContainers(context.clientSet, initPodWithContainers(context.clientSet, &podWithContainersConfig{
|
||||
Name: podName,
|
||||
Namespace: context.ns.Name,
|
||||
Containers: makeContainersWithImages(image.Names),
|
||||
}))
|
||||
if err != nil {
|
||||
t.Fatalf("error running pod with images: %v", err)
|
||||
}
|
||||
if pod.Spec.NodeName != nodeWithLargeImage.Name {
|
||||
t.Errorf("pod %v got scheduled on an unexpected node: %v. Expected node: %v.", podName, pod.Spec.NodeName, nodeWithLargeImage.Name)
|
||||
} else {
|
||||
t.Logf("pod %v got successfully scheduled on node %v.", podName, pod.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
|
||||
// makeContainerWithImage returns a list of v1.Container objects for each given image. Duplicates of an image are ignored,
|
||||
// i.e., each image is used only once.
|
||||
func makeContainersWithImages(images []string) []v1.Container {
|
||||
var containers []v1.Container
|
||||
usedImages := make(map[string]struct{})
|
||||
|
||||
for _, image := range images {
|
||||
if _, ok := usedImages[image]; !ok {
|
||||
containers = append(containers, v1.Container{
|
||||
Name: strings.Replace(image, ":", "-", -1) + "-container",
|
||||
Image: image,
|
||||
})
|
||||
usedImages[image] = struct{}{}
|
||||
}
|
||||
}
|
||||
return containers
|
||||
}
|
||||
|
26
vendor/k8s.io/kubernetes/test/integration/scheduler/scheduler_test.go
generated
vendored
26
vendor/k8s.io/kubernetes/test/integration/scheduler/scheduler_test.go
generated
vendored
@@ -45,11 +45,11 @@ import (
|
||||
schedulerapp "k8s.io/kubernetes/cmd/kube-scheduler/app"
|
||||
schedulerappconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
@@ -140,6 +140,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
"GeneralPredicates",
|
||||
"MatchInterPodAffinity",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"MaxCSIVolumeCountPred",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
"NoDiskConflict",
|
||||
@@ -154,6 +155,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
"NodePreferAvoidPodsPriority",
|
||||
"SelectorSpreadPriority",
|
||||
"TaintTolerationPriority",
|
||||
"ImageLocalityPriority",
|
||||
),
|
||||
},
|
||||
{
|
||||
@@ -173,7 +175,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
configPolicyName := fmt.Sprintf("scheduler-custom-policy-config-%d", i)
|
||||
policyConfigMap := v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: configPolicyName},
|
||||
Data: map[string]string{componentconfig.SchedulerPolicyConfigMapKey: test.policy},
|
||||
Data: map[string]string{kubeschedulerconfig.SchedulerPolicyConfigMapKey: test.policy},
|
||||
}
|
||||
|
||||
policyConfigMap.APIVersion = "v1"
|
||||
@@ -182,18 +184,20 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
|
||||
|
||||
defaultBindTimeout := int64(30)
|
||||
ss := &schedulerappconfig.Config{
|
||||
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
AlgorithmSource: kubeschedulerconfig.SchedulerAlgorithmSource{
|
||||
Policy: &kubeschedulerconfig.SchedulerPolicySource{
|
||||
ConfigMap: &kubeschedulerconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: policyConfigMap.Namespace,
|
||||
Name: policyConfigMap.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
BindTimeoutSeconds: &defaultBindTimeout,
|
||||
},
|
||||
Client: clientSet,
|
||||
InformerFactory: informerFactory,
|
||||
@@ -243,18 +247,20 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
|
||||
|
||||
defaultBindTimeout := int64(30)
|
||||
ss := &schedulerappconfig.Config{
|
||||
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
AlgorithmSource: kubeschedulerconfig.SchedulerAlgorithmSource{
|
||||
Policy: &kubeschedulerconfig.SchedulerPolicySource{
|
||||
ConfigMap: &kubeschedulerconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: "non-existent-config",
|
||||
Name: "non-existent-config",
|
||||
},
|
||||
},
|
||||
},
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
BindTimeoutSeconds: &defaultBindTimeout,
|
||||
},
|
||||
Client: clientSet,
|
||||
InformerFactory: informerFactory,
|
||||
|
708
vendor/k8s.io/kubernetes/test/integration/scheduler/taint_test.go
generated
vendored
708
vendor/k8s.io/kubernetes/test/integration/scheduler/taint_test.go
generated
vendored
@@ -19,7 +19,7 @@ package scheduler
|
||||
// This file tests the Taint feature.
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -29,7 +29,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
"k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||
@@ -40,10 +39,28 @@ import (
|
||||
pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
|
||||
)
|
||||
|
||||
// TestTaintNodeByCondition verifies:
|
||||
// 1. MemoryPressure Toleration is added to non-BestEffort Pod by PodTolerationRestriction
|
||||
// 2. NodeController taints nodes by node condition
|
||||
// 3. Scheduler allows pod to tolerate node condition taints, e.g. network unavailable
|
||||
func newPod(nsName, name string, req, limit v1.ResourceList) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: nsName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: "busybox",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: req,
|
||||
Limits: limit,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TestTaintNodeByCondition tests related cases for TaintNodeByCondition feature.
|
||||
func TestTaintNodeByCondition(t *testing.T) {
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled("TaintNodesByCondition")
|
||||
defer func() {
|
||||
@@ -76,7 +93,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
algorithmprovider.ApplyFeatureGates()
|
||||
|
||||
context = initTestScheduler(t, context, controllerCh, false, nil)
|
||||
clientset := context.clientSet
|
||||
cs := context.clientSet
|
||||
informers := context.informerFactory
|
||||
nsName := context.ns.Name
|
||||
|
||||
@@ -86,8 +103,8 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
informers.Core().V1().Nodes(),
|
||||
informers.Extensions().V1beta1().DaemonSets(),
|
||||
nil, // CloudProvider
|
||||
clientset,
|
||||
time.Second, // Node monitor grace period
|
||||
cs,
|
||||
time.Hour, // Node monitor grace period
|
||||
time.Second, // Node startup grace period
|
||||
time.Second, // Node monitor period
|
||||
time.Second, // Pod eviction timeout
|
||||
@@ -108,87 +125,375 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
// Waiting for all controller sync.
|
||||
internalInformers.Start(controllerCh)
|
||||
internalInformers.WaitForCacheSync(controllerCh)
|
||||
informers.Start(controllerCh)
|
||||
informers.WaitForCacheSync(controllerCh)
|
||||
|
||||
// -------------------------------------------
|
||||
// Test TaintNodeByCondition feature.
|
||||
// -------------------------------------------
|
||||
nodeRes := v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
v1.ResourcePods: resource.MustParse("110"),
|
||||
}
|
||||
|
||||
podRes := v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
}
|
||||
|
||||
notReadyToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
unreachableToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
unschedulableToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
outOfDiskToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeOutOfDisk,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
memoryPressureToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeMemoryPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
// Case 1: Add MememoryPressure Toleration for non-BestEffort pod.
|
||||
burstablePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "burstable-pod",
|
||||
Namespace: nsName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
diskPressureToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeDiskPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
networkUnavailableToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeNetworkUnavailable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
pidPressureToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodePIDPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
bestEffortPod := newPod(nsName, "besteffort-pod", nil, nil)
|
||||
burstablePod := newPod(nsName, "burstable-pod", podRes, nil)
|
||||
guaranteePod := newPod(nsName, "guarantee-pod", podRes, podRes)
|
||||
|
||||
type podCase struct {
|
||||
pod *v1.Pod
|
||||
tolerations []v1.Toleration
|
||||
fits bool
|
||||
}
|
||||
|
||||
// switch to table driven testings
|
||||
tests := []struct {
|
||||
name string
|
||||
existingTaints []v1.Taint
|
||||
nodeConditions []v1.NodeCondition
|
||||
unschedulable bool
|
||||
expectedTaints []v1.Taint
|
||||
pods []podCase
|
||||
}{
|
||||
{
|
||||
name: "not-ready node",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: "busybox",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
},
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{notReadyToleration},
|
||||
fits: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unreachable node",
|
||||
existingTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionUnknown, // node status is "Unknown"
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{unreachableToleration},
|
||||
fits: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unschedulable node",
|
||||
unschedulable: true, // node.spec.unschedulable = true
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{unschedulableToleration},
|
||||
fits: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "out of disk node",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeOutOfDisk,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeOutOfDisk,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
// In OutOfDisk condition, only pods with toleration can be scheduled.
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{outOfDiskToleration},
|
||||
fits: true,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{diskPressureToleration},
|
||||
fits: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "memory pressure node",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeMemoryPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeMemoryPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
// In MemoryPressure condition, both Burstable and Guarantee pods are scheduled;
|
||||
// BestEffort pod with toleration are also scheduled.
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{memoryPressureToleration},
|
||||
fits: true,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{diskPressureToleration},
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: true,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "disk pressure node",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeDiskPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeDiskPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
// In DiskPressure condition, only pods with toleration can be scheduled.
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{diskPressureToleration},
|
||||
fits: true,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{memoryPressureToleration},
|
||||
fits: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "network unavailable and node is ready",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeNetworkUnavailable,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeNetworkUnavailable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
tolerations: []v1.Toleration{
|
||||
networkUnavailableToleration,
|
||||
},
|
||||
fits: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
burstablePodInServ, err := clientset.CoreV1().Pods(nsName).Create(burstablePod)
|
||||
if err != nil {
|
||||
t.Errorf("Case 1: Failed to create pod: %v", err)
|
||||
} else if !reflect.DeepEqual(burstablePodInServ.Spec.Tolerations, []v1.Toleration{memoryPressureToleration}) {
|
||||
t.Errorf("Case 1: Unexpected toleration of non-BestEffort pod, expected: %+v, got: %v",
|
||||
[]v1.Toleration{memoryPressureToleration},
|
||||
burstablePodInServ.Spec.Tolerations)
|
||||
}
|
||||
|
||||
// Case 2: No MemoryPressure Toleration for BestEffort pod.
|
||||
besteffortPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "best-effort-pod",
|
||||
Namespace: nsName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: "busybox",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
besteffortPodInServ, err := clientset.CoreV1().Pods(nsName).Create(besteffortPod)
|
||||
if err != nil {
|
||||
t.Errorf("Case 2: Failed to create pod: %v", err)
|
||||
} else if len(besteffortPodInServ.Spec.Tolerations) != 0 {
|
||||
t.Errorf("Case 2: Unexpected toleration # of BestEffort pod, expected: 0, got: %v",
|
||||
len(besteffortPodInServ.Spec.Tolerations))
|
||||
}
|
||||
|
||||
// Case 3: Taint Node by NetworkUnavailable condition.
|
||||
networkUnavailableNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-1",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
v1.ResourcePods: resource.MustParse("110"),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
v1.ResourcePods: resource.MustParse("110"),
|
||||
},
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
name: "network unavailable and node is not ready",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeNetworkUnavailable,
|
||||
Status: v1.ConditionTrue,
|
||||
@@ -198,116 +503,175 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
nodeInformerCh := make(chan bool)
|
||||
nodeInformer := informers.Core().V1().Nodes().Informer()
|
||||
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
curNode := cur.(*v1.Node)
|
||||
if curNode.Name != "node-1" {
|
||||
return
|
||||
}
|
||||
for _, taint := range curNode.Spec.Taints {
|
||||
if taint.Key == algorithm.TaintNodeNetworkUnavailable &&
|
||||
taint.Effect == v1.TaintEffectNoSchedule {
|
||||
nodeInformerCh <- true
|
||||
break
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
if _, err := clientset.CoreV1().Nodes().Create(networkUnavailableNode); err != nil {
|
||||
t.Errorf("Case 3: Failed to create node: %v", err)
|
||||
} else {
|
||||
select {
|
||||
case <-time.After(60 * time.Second):
|
||||
t.Errorf("Case 3: Failed to taint node after 60s.")
|
||||
case <-nodeInformerCh:
|
||||
}
|
||||
}
|
||||
|
||||
// Case 4: Schedule Pod with NetworkUnavailable toleration.
|
||||
networkDaemonPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "network-daemon-pod",
|
||||
Namespace: nsName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: "busybox",
|
||||
Key: algorithm.TaintNodeNetworkUnavailable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
Tolerations: []v1.Toleration{
|
||||
pods: []podCase{
|
||||
{
|
||||
Key: algorithm.TaintNodeNetworkUnavailable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
tolerations: []v1.Toleration{
|
||||
networkUnavailableToleration,
|
||||
},
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
tolerations: []v1.Toleration{
|
||||
networkUnavailableToleration,
|
||||
notReadyToleration,
|
||||
},
|
||||
fits: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pid pressure node",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodePIDPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodePIDPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{pidPressureToleration},
|
||||
fits: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multi taints on node",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodePIDPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeMemoryPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeDiskPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeDiskPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodeMemoryPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: algorithm.TaintNodePIDPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := clientset.CoreV1().Pods(nsName).Create(networkDaemonPod); err != nil {
|
||||
t.Errorf("Case 4: Failed to create pod for network daemon: %v", err)
|
||||
} else {
|
||||
if err := waitForPodToScheduleWithTimeout(clientset, networkDaemonPod, time.Second*60); err != nil {
|
||||
t.Errorf("Case 4: Failed to schedule network daemon pod in 60s.")
|
||||
}
|
||||
}
|
||||
|
||||
// Case 5: Taint node by unschedulable condition
|
||||
unschedulableNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-2",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
v1.ResourcePods: resource.MustParse("110"),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
v1.ResourcePods: resource.MustParse("110"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
nodeInformerCh2 := make(chan bool)
|
||||
nodeInformer2 := informers.Core().V1().Nodes().Informer()
|
||||
nodeInformer2.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
curNode := cur.(*v1.Node)
|
||||
if curNode.Name != "node-2" {
|
||||
return
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-1",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Unschedulable: test.unschedulable,
|
||||
Taints: test.existingTaints,
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: nodeRes,
|
||||
Allocatable: nodeRes,
|
||||
Conditions: test.nodeConditions,
|
||||
},
|
||||
}
|
||||
|
||||
for _, taint := range curNode.Spec.Taints {
|
||||
if taint.Key == algorithm.TaintNodeUnschedulable &&
|
||||
taint.Effect == v1.TaintEffectNoSchedule {
|
||||
nodeInformerCh2 <- true
|
||||
break
|
||||
if _, err := cs.CoreV1().Nodes().Create(node); err != nil {
|
||||
t.Errorf("Failed to create node, err: %v", err)
|
||||
}
|
||||
if err := waitForNodeTaints(cs, node, test.expectedTaints); err != nil {
|
||||
t.Errorf("Failed to taint node <%s>, err: %v", node.Name, err)
|
||||
}
|
||||
|
||||
var pods []*v1.Pod
|
||||
for i, p := range test.pods {
|
||||
pod := p.pod.DeepCopy()
|
||||
pod.Name = fmt.Sprintf("%s-%d", pod.Name, i)
|
||||
pod.Spec.Tolerations = p.tolerations
|
||||
|
||||
createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod %s/%s, error: %v",
|
||||
pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
pods = append(pods, createdPod)
|
||||
|
||||
if p.fits {
|
||||
if err := waitForPodToSchedule(cs, createdPod); err != nil {
|
||||
t.Errorf("Failed to schedule pod %s/%s on the node, err: %v",
|
||||
pod.Namespace, pod.Name, err)
|
||||
}
|
||||
} else {
|
||||
if err := waitForPodUnschedulable(cs, createdPod); err != nil {
|
||||
t.Errorf("Unschedulable pod %s/%s gets scheduled on the node, err: %v",
|
||||
pod.Namespace, pod.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
if _, err := clientset.CoreV1().Nodes().Create(unschedulableNode); err != nil {
|
||||
t.Errorf("Case 5: Failed to create node: %v", err)
|
||||
} else {
|
||||
select {
|
||||
case <-time.After(60 * time.Second):
|
||||
t.Errorf("Case 5: Failed to taint node after 60s.")
|
||||
case <-nodeInformerCh2:
|
||||
}
|
||||
cleanupPods(cs, t, pods)
|
||||
cleanupNodes(cs, t)
|
||||
waitForSchedulerCacheCleanup(context.scheduler, t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
162
vendor/k8s.io/kubernetes/test/integration/scheduler/util.go
generated
vendored
162
vendor/k8s.io/kubernetes/test/integration/scheduler/util.go
generated
vendored
@@ -51,6 +51,7 @@ import (
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@@ -73,23 +74,25 @@ func createConfiguratorWithPodInformer(
|
||||
podInformer coreinformers.PodInformer,
|
||||
informerFactory informers.SharedInformerFactory,
|
||||
) scheduler.Configurator {
|
||||
return factory.NewConfigFactory(
|
||||
schedulerName,
|
||||
clientSet,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
podInformer,
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.EnableEquivalenceClassCache),
|
||||
false,
|
||||
)
|
||||
return factory.NewConfigFactory(&factory.ConfigFactoryArgs{
|
||||
SchedulerName: schedulerName,
|
||||
Client: clientSet,
|
||||
NodeInformer: informerFactory.Core().V1().Nodes(),
|
||||
PodInformer: podInformer,
|
||||
PvInformer: informerFactory.Core().V1().PersistentVolumes(),
|
||||
PvcInformer: informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
ReplicationControllerInformer: informerFactory.Core().V1().ReplicationControllers(),
|
||||
ReplicaSetInformer: informerFactory.Apps().V1().ReplicaSets(),
|
||||
StatefulSetInformer: informerFactory.Apps().V1().StatefulSets(),
|
||||
ServiceInformer: informerFactory.Core().V1().Services(),
|
||||
PdbInformer: informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
StorageClassInformer: informerFactory.Storage().V1().StorageClasses(),
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
EnableEquivalenceClassCache: utilfeature.DefaultFeatureGate.Enabled(features.EnableEquivalenceClassCache),
|
||||
DisablePreemption: false,
|
||||
PercentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
|
||||
BindTimeoutSeconds: 600,
|
||||
})
|
||||
}
|
||||
|
||||
// initTestMasterAndScheduler initializes a test environment and creates a master with default
|
||||
@@ -141,7 +144,7 @@ func initTestScheduler(
|
||||
) *TestContext {
|
||||
// Pod preemption is enabled by default scheduler configuration, but preemption only happens when PodPriority
|
||||
// feature gate is enabled at the same time.
|
||||
return initTestSchedulerWithOptions(t, context, controllerCh, setPodInformer, policy, false)
|
||||
return initTestSchedulerWithOptions(t, context, controllerCh, setPodInformer, policy, false, false, time.Second)
|
||||
}
|
||||
|
||||
// initTestSchedulerWithOptions initializes a test environment and creates a scheduler with default
|
||||
@@ -153,15 +156,18 @@ func initTestSchedulerWithOptions(
|
||||
setPodInformer bool,
|
||||
policy *schedulerapi.Policy,
|
||||
disablePreemption bool,
|
||||
disableEquivalenceCache bool,
|
||||
resyncPeriod time.Duration,
|
||||
) *TestContext {
|
||||
// Enable EnableEquivalenceClassCache for all integration tests.
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(
|
||||
t,
|
||||
utilfeature.DefaultFeatureGate,
|
||||
features.EnableEquivalenceClassCache, true)()
|
||||
if !disableEquivalenceCache {
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(
|
||||
t,
|
||||
utilfeature.DefaultFeatureGate,
|
||||
features.EnableEquivalenceClassCache, true)()
|
||||
}
|
||||
|
||||
// 1. Create scheduler
|
||||
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, time.Second)
|
||||
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, resyncPeriod)
|
||||
|
||||
var podInformer coreinformers.PodInformer
|
||||
|
||||
@@ -253,7 +259,7 @@ func initTest(t *testing.T, nsPrefix string) *TestContext {
|
||||
// configuration but with pod preemption disabled.
|
||||
func initTestDisablePreemption(t *testing.T, nsPrefix string) *TestContext {
|
||||
return initTestSchedulerWithOptions(
|
||||
t, initTestMaster(t, nsPrefix, nil), nil, true, nil, true)
|
||||
t, initTestMaster(t, nsPrefix, nil), nil, true, nil, true, false, time.Second)
|
||||
}
|
||||
|
||||
// cleanupTest deletes the scheduler and the test namespace. It should be called
|
||||
@@ -322,24 +328,35 @@ func waitForNodeLabels(cs clientset.Interface, nodeName string, labels map[strin
|
||||
return wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, nodeHasLabels(cs, nodeName, labels))
|
||||
}
|
||||
|
||||
// createNode creates a node with the given resource list and
|
||||
// returns a pointer and error status. If 'res' is nil, a predefined amount of
|
||||
// initNode returns a node with the given resource list and images. If 'res' is nil, a predefined amount of
|
||||
// resource will be used.
|
||||
func createNode(cs clientset.Interface, name string, res *v1.ResourceList) (*v1.Node, error) {
|
||||
func initNode(name string, res *v1.ResourceList, images []v1.ContainerImage) *v1.Node {
|
||||
// if resource is nil, we use a default amount of resources for the node.
|
||||
if res == nil {
|
||||
res = &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
}
|
||||
}
|
||||
|
||||
n := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: *res,
|
||||
Images: images,
|
||||
},
|
||||
}
|
||||
return cs.CoreV1().Nodes().Create(n)
|
||||
return n
|
||||
}
|
||||
|
||||
// createNode creates a node with the given resource list.
|
||||
func createNode(cs clientset.Interface, name string, res *v1.ResourceList) (*v1.Node, error) {
|
||||
return cs.CoreV1().Nodes().Create(initNode(name, res, nil))
|
||||
}
|
||||
|
||||
// createNodeWithImages creates a node with the given resource list and images.
|
||||
func createNodeWithImages(cs clientset.Interface, name string, res *v1.ResourceList, images []v1.ContainerImage) (*v1.Node, error) {
|
||||
return cs.CoreV1().Nodes().Create(initNode(name, res, images))
|
||||
}
|
||||
|
||||
// updateNodeStatus updates the status of node.
|
||||
@@ -363,6 +380,44 @@ func createNodes(cs clientset.Interface, prefix string, res *v1.ResourceList, nu
|
||||
return nodes[:], nil
|
||||
}
|
||||
|
||||
// nodeTainted return a condition function that returns true if the given node contains
|
||||
// the taints.
|
||||
func nodeTainted(cs clientset.Interface, nodeName string, taints []v1.Taint) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(taints) != len(node.Spec.Taints) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, taint := range taints {
|
||||
if !taintutils.TaintExists(node.Spec.Taints, &taint) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// waitForNodeTaints waits for a node to have the target taints and returns
|
||||
// an error if it does not have taints within the given timeout.
|
||||
func waitForNodeTaints(cs clientset.Interface, node *v1.Node, taints []v1.Taint) error {
|
||||
return wait.Poll(100*time.Millisecond, 30*time.Second, nodeTainted(cs, node.Name, taints))
|
||||
}
|
||||
|
||||
// cleanupNodes deletes all nodes.
|
||||
func cleanupNodes(cs clientset.Interface, t *testing.T) {
|
||||
err := cs.CoreV1().Nodes().DeleteCollection(
|
||||
metav1.NewDeleteOptions(0), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("error while deleting all nodes: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
type pausePodConfig struct {
|
||||
Name string
|
||||
Namespace string
|
||||
@@ -451,6 +506,43 @@ func runPausePod(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error) {
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
type podWithContainersConfig struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Containers []v1.Container
|
||||
}
|
||||
|
||||
// initPodWithContainers initializes a pod API object from the given config. This is used primarily for generating
|
||||
// pods with containers each having a specific image.
|
||||
func initPodWithContainers(cs clientset.Interface, conf *podWithContainersConfig) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: conf.Name,
|
||||
Namespace: conf.Namespace,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: conf.Containers,
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
// runPodWithContainers creates a pod with given config and containers and waits
|
||||
// until it is scheduled. It returns its pointer and error status.
|
||||
func runPodWithContainers(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error) {
|
||||
pod, err := cs.CoreV1().Pods(pod.Namespace).Create(pod)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error creating pod-with-containers: %v", err)
|
||||
}
|
||||
if err = waitForPodToSchedule(cs, pod); err != nil {
|
||||
return pod, fmt.Errorf("Pod %v didn't schedule successfully. Error: %v", pod.Name, err)
|
||||
}
|
||||
if pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil {
|
||||
return pod, fmt.Errorf("Error getting pod %v info: %v", pod.Name, err)
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// podDeleted returns true if a pod is not found in the given namespace.
|
||||
func podDeleted(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
@@ -638,3 +730,15 @@ func cleanupPodsInNamespace(cs clientset.Interface, t *testing.T, ns string) {
|
||||
t.Errorf("error while waiting for pods in namespace %v: %v", ns, err)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForSchedulerCacheCleanup(sched *scheduler.Scheduler, t *testing.T) {
|
||||
schedulerCacheIsEmpty := func() (bool, error) {
|
||||
snapshot := sched.Cache().Snapshot()
|
||||
|
||||
return len(snapshot.Nodes) == 0 && len(snapshot.AssumedPods) == 0, nil
|
||||
}
|
||||
|
||||
if err := wait.Poll(time.Second, wait.ForeverTestTimeout, schedulerCacheIsEmpty); err != nil {
|
||||
t.Errorf("Failed to wait for scheduler cache cleanup: %v", err)
|
||||
}
|
||||
}
|
||||
|
432
vendor/k8s.io/kubernetes/test/integration/scheduler/volume_binding_test.go
generated
vendored
432
vendor/k8s.io/kubernetes/test/integration/scheduler/volume_binding_test.go
generated
vendored
@@ -20,6 +20,7 @@ package scheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -32,10 +33,17 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
|
||||
persistentvolumeoptions "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/options"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
type testConfig struct {
|
||||
@@ -55,14 +63,21 @@ var (
|
||||
|
||||
classWait = "wait"
|
||||
classImmediate = "immediate"
|
||||
classDynamic = "dynamic"
|
||||
|
||||
sharedClasses = map[storagev1.VolumeBindingMode]*storagev1.StorageClass{
|
||||
modeImmediate: makeStorageClass(classImmediate, &modeImmediate),
|
||||
modeWait: makeStorageClass(classWait, &modeWait),
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
node1 = "node-1"
|
||||
node2 = "node-2"
|
||||
podLimit = 100
|
||||
volsPerPod = 5
|
||||
nodeAffinityLabelKey = "kubernetes.io/hostname"
|
||||
node1 = "node-1"
|
||||
node2 = "node-2"
|
||||
podLimit = 100
|
||||
volsPerPod = 5
|
||||
nodeAffinityLabelKey = "kubernetes.io/hostname"
|
||||
provisionerPluginName = "kubernetes.io/mock-provisioner"
|
||||
)
|
||||
|
||||
type testPV struct {
|
||||
@@ -79,7 +94,11 @@ type testPVC struct {
|
||||
}
|
||||
|
||||
func TestVolumeBinding(t *testing.T) {
|
||||
config := setupCluster(t, "volume-scheduling", 2)
|
||||
features := map[string]bool{
|
||||
"VolumeScheduling": true,
|
||||
"PersistentLocalVolumes": true,
|
||||
}
|
||||
config := setupCluster(t, "volume-scheduling-", 2, features, 0, 0, false)
|
||||
defer config.teardown()
|
||||
|
||||
cases := map[string]struct {
|
||||
@@ -246,26 +265,194 @@ func TestVolumeBinding(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestVolumeBindingStress creates <podLimit> pods, each with <volsPerPod> unbound PVCs.
|
||||
func TestVolumeBindingStress(t *testing.T) {
|
||||
config := setupCluster(t, "volume-binding-stress", 1)
|
||||
// TestVolumeBindingRescheduling tests scheduler will retry scheduling when needed.
|
||||
func TestVolumeBindingRescheduling(t *testing.T) {
|
||||
features := map[string]bool{
|
||||
"VolumeScheduling": true,
|
||||
"PersistentLocalVolumes": true,
|
||||
}
|
||||
config := setupCluster(t, "volume-scheduling-", 2, features, 0, 0, false)
|
||||
defer config.teardown()
|
||||
|
||||
storageClassName := "local-storage"
|
||||
|
||||
cases := map[string]struct {
|
||||
pod *v1.Pod
|
||||
pvcs []*testPVC
|
||||
pvs []*testPV
|
||||
trigger func(config *testConfig)
|
||||
shouldFail bool
|
||||
}{
|
||||
"reschedule on WaitForFirstConsumer dynamic storage class add": {
|
||||
pod: makePod("pod-reschedule-onclassadd-dynamic", config.ns, []string{"pvc-reschedule-onclassadd-dynamic"}),
|
||||
pvcs: []*testPVC{
|
||||
{"pvc-reschedule-onclassadd-dynamic", "", ""},
|
||||
},
|
||||
trigger: func(config *testConfig) {
|
||||
sc := makeDynamicProvisionerStorageClass(storageClassName, &modeWait)
|
||||
if _, err := config.client.StorageV1().StorageClasses().Create(sc); err != nil {
|
||||
t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err)
|
||||
}
|
||||
},
|
||||
shouldFail: false,
|
||||
},
|
||||
"reschedule on WaitForFirstConsumer static storage class add": {
|
||||
pod: makePod("pod-reschedule-onclassadd-static", config.ns, []string{"pvc-reschedule-onclassadd-static"}),
|
||||
pvcs: []*testPVC{
|
||||
{"pvc-reschedule-onclassadd-static", "", ""},
|
||||
},
|
||||
trigger: func(config *testConfig) {
|
||||
sc := makeStorageClass(storageClassName, &modeWait)
|
||||
if _, err := config.client.StorageV1().StorageClasses().Create(sc); err != nil {
|
||||
t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err)
|
||||
}
|
||||
// Create pv for this class to mock static provisioner behavior.
|
||||
pv := makePV("pv-reschedule-onclassadd-static", storageClassName, "", "", node1)
|
||||
if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
}
|
||||
},
|
||||
shouldFail: false,
|
||||
},
|
||||
"reschedule on delay binding PVC add": {
|
||||
pod: makePod("pod-reschedule-onpvcadd", config.ns, []string{"pvc-reschedule-onpvcadd"}),
|
||||
pvs: []*testPV{
|
||||
{
|
||||
name: "pv-reschedule-onpvcadd",
|
||||
scMode: modeWait,
|
||||
node: node1,
|
||||
},
|
||||
},
|
||||
trigger: func(config *testConfig) {
|
||||
pvc := makePVC("pvc-reschedule-onpvcadd", config.ns, &classWait, "")
|
||||
if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
||||
}
|
||||
},
|
||||
shouldFail: false,
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range cases {
|
||||
glog.Infof("Running test %v", name)
|
||||
|
||||
if test.pod == nil {
|
||||
t.Fatal("pod is required for this test")
|
||||
}
|
||||
|
||||
// Create unbound pvc
|
||||
for _, pvcConfig := range test.pvcs {
|
||||
pvc := makePVC(pvcConfig.name, config.ns, &storageClassName, "")
|
||||
if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create PVs
|
||||
for _, pvConfig := range test.pvs {
|
||||
pv := makePV(pvConfig.name, sharedClasses[pvConfig.scMode].Name, pvConfig.preboundPVC, config.ns, pvConfig.node)
|
||||
if _, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create pod
|
||||
if _, err := config.client.CoreV1().Pods(config.ns).Create(test.pod); err != nil {
|
||||
t.Fatalf("Failed to create Pod %q: %v", test.pod.Name, err)
|
||||
}
|
||||
|
||||
// Wait for pod is unschedulable.
|
||||
glog.Infof("Waiting for pod is unschedulable")
|
||||
if err := waitForPodUnschedulable(config.client, test.pod); err != nil {
|
||||
t.Errorf("Failed as Pod %s was not unschedulable: %v", test.pod.Name, err)
|
||||
}
|
||||
|
||||
// Trigger
|
||||
test.trigger(config)
|
||||
|
||||
// Wait for pod is scheduled or unscheduable.
|
||||
if !test.shouldFail {
|
||||
glog.Infof("Waiting for pod is scheduled")
|
||||
if err := waitForPodToSchedule(config.client, test.pod); err != nil {
|
||||
t.Errorf("Failed to schedule Pod %q: %v", test.pod.Name, err)
|
||||
}
|
||||
} else {
|
||||
glog.Infof("Waiting for pod is unschedulable")
|
||||
if err := waitForPodUnschedulable(config.client, test.pod); err != nil {
|
||||
t.Errorf("Failed as Pod %s was not unschedulable: %v", test.pod.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Force delete objects, but they still may not be immediately removed
|
||||
deleteTestObjects(config.client, config.ns, deleteOption)
|
||||
}
|
||||
}
|
||||
|
||||
// TestVolumeBindingStress creates <podLimit> pods, each with <volsPerPod> unbound PVCs.
|
||||
// PVs are precreated.
|
||||
func TestVolumeBindingStress(t *testing.T) {
|
||||
testVolumeBindingStress(t, 0, false, 0)
|
||||
}
|
||||
|
||||
// Like TestVolumeBindingStress but with scheduler resync. In real cluster,
|
||||
// scheduler will schedule failed pod frequently due to various events, e.g.
|
||||
// service/node update events.
|
||||
// This is useful to detect possible race conditions.
|
||||
func TestVolumeBindingStressWithSchedulerResync(t *testing.T) {
|
||||
testVolumeBindingStress(t, time.Second, false, 0)
|
||||
}
|
||||
|
||||
// Like TestVolumeBindingStress but with fast dynamic provisioning
|
||||
func TestVolumeBindingDynamicStressFast(t *testing.T) {
|
||||
testVolumeBindingStress(t, 0, true, 0)
|
||||
}
|
||||
|
||||
// Like TestVolumeBindingStress but with slow dynamic provisioning
|
||||
func TestVolumeBindingDynamicStressSlow(t *testing.T) {
|
||||
testVolumeBindingStress(t, 0, true, 30)
|
||||
}
|
||||
|
||||
func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration, dynamic bool, provisionDelaySeconds int) {
|
||||
features := map[string]bool{
|
||||
"VolumeScheduling": true,
|
||||
"PersistentLocalVolumes": true,
|
||||
}
|
||||
config := setupCluster(t, "volume-binding-stress-", 1, features, schedulerResyncPeriod, provisionDelaySeconds, false)
|
||||
defer config.teardown()
|
||||
|
||||
// Set max volume limit to the number of PVCs the test will create
|
||||
// TODO: remove when max volume limit allows setting through storageclass
|
||||
if err := os.Setenv(predicates.KubeMaxPDVols, fmt.Sprintf("%v", podLimit*volsPerPod)); err != nil {
|
||||
t.Fatalf("failed to set max pd limit: %v", err)
|
||||
}
|
||||
defer os.Unsetenv(predicates.KubeMaxPDVols)
|
||||
|
||||
scName := &classWait
|
||||
if dynamic {
|
||||
scName = &classDynamic
|
||||
sc := makeDynamicProvisionerStorageClass(*scName, &modeWait)
|
||||
if _, err := config.client.StorageV1().StorageClasses().Create(sc); err != nil {
|
||||
t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create enough PVs and PVCs for all the pods
|
||||
pvs := []*v1.PersistentVolume{}
|
||||
pvcs := []*v1.PersistentVolumeClaim{}
|
||||
for i := 0; i < podLimit*volsPerPod; i++ {
|
||||
pv := makePV(fmt.Sprintf("pv-stress-%v", i), classWait, "", "", node1)
|
||||
pvc := makePVC(fmt.Sprintf("pvc-stress-%v", i), config.ns, &classWait, "")
|
||||
|
||||
if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
// Don't create pvs for dynamic provisioning test
|
||||
if !dynamic {
|
||||
pv := makePV(fmt.Sprintf("pv-stress-%v", i), *scName, "", "", node1)
|
||||
if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
}
|
||||
pvs = append(pvs, pv)
|
||||
}
|
||||
|
||||
pvc := makePVC(fmt.Sprintf("pvc-stress-%v", i), config.ns, scName, "")
|
||||
if pvc, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
||||
}
|
||||
|
||||
pvs = append(pvs, pv)
|
||||
pvcs = append(pvcs, pvc)
|
||||
}
|
||||
|
||||
@@ -277,7 +464,7 @@ func TestVolumeBindingStress(t *testing.T) {
|
||||
podPvcs = append(podPvcs, pvcs[j].Name)
|
||||
}
|
||||
|
||||
pod := makePod(fmt.Sprintf("pod%v", i), config.ns, podPvcs)
|
||||
pod := makePod(fmt.Sprintf("pod%03d", i), config.ns, podPvcs)
|
||||
if pod, err := config.client.CoreV1().Pods(config.ns).Create(pod); err != nil {
|
||||
t.Fatalf("Failed to create Pod %q: %v", pod.Name, err)
|
||||
}
|
||||
@@ -288,7 +475,7 @@ func TestVolumeBindingStress(t *testing.T) {
|
||||
for _, pod := range pods {
|
||||
// Use increased timeout for stress test because there is a higher chance of
|
||||
// PV sync error
|
||||
if err := waitForPodToScheduleWithTimeout(config.client, pod, 60*time.Second); err != nil {
|
||||
if err := waitForPodToScheduleWithTimeout(config.client, pod, 2*time.Minute); err != nil {
|
||||
t.Errorf("Failed to schedule Pod %q: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
@@ -302,8 +489,142 @@ func TestVolumeBindingStress(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, numPVsFirstNode int) {
|
||||
features := map[string]bool{
|
||||
"VolumeScheduling": true,
|
||||
"PersistentLocalVolumes": true,
|
||||
}
|
||||
// TODO: disable equivalence cache until kubernetes/kubernetes#67680 is fixed
|
||||
config := setupCluster(t, "volume-pod-affinity-", numNodes, features, 0, 0, true)
|
||||
defer config.teardown()
|
||||
|
||||
pods := []*v1.Pod{}
|
||||
pvcs := []*v1.PersistentVolumeClaim{}
|
||||
pvs := []*v1.PersistentVolume{}
|
||||
|
||||
// Create PVs for the first node
|
||||
for i := 0; i < numPVsFirstNode; i++ {
|
||||
pv := makePV(fmt.Sprintf("pv-node1-%v", i), classWait, "", "", node1)
|
||||
if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
}
|
||||
pvs = append(pvs, pv)
|
||||
}
|
||||
|
||||
// Create 1 PV per Node for the remaining nodes
|
||||
for i := 2; i <= numNodes; i++ {
|
||||
pv := makePV(fmt.Sprintf("pv-node%v-0", i), classWait, "", "", fmt.Sprintf("node-%v", i))
|
||||
if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
}
|
||||
pvs = append(pvs, pv)
|
||||
}
|
||||
|
||||
// Create pods
|
||||
for i := 0; i < numPods; i++ {
|
||||
// Create one pvc per pod
|
||||
pvc := makePVC(fmt.Sprintf("pvc-%v", i), config.ns, &classWait, "")
|
||||
if pvc, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
||||
}
|
||||
pvcs = append(pvcs, pvc)
|
||||
|
||||
// Create pod with pod affinity
|
||||
pod := makePod(fmt.Sprintf("pod%03d", i), config.ns, []string{pvc.Name})
|
||||
pod.Spec.Affinity = &v1.Affinity{}
|
||||
affinityTerms := []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "app",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"volume-binding-test"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: nodeAffinityLabelKey,
|
||||
},
|
||||
}
|
||||
if anti {
|
||||
pod.Spec.Affinity.PodAntiAffinity = &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: affinityTerms,
|
||||
}
|
||||
} else {
|
||||
pod.Spec.Affinity.PodAffinity = &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: affinityTerms,
|
||||
}
|
||||
}
|
||||
|
||||
if pod, err := config.client.CoreV1().Pods(config.ns).Create(pod); err != nil {
|
||||
t.Fatalf("Failed to create Pod %q: %v", pod.Name, err)
|
||||
}
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
|
||||
// Validate Pods scheduled
|
||||
scheduledNodes := sets.NewString()
|
||||
for _, pod := range pods {
|
||||
if err := waitForPodToSchedule(config.client, pod); err != nil {
|
||||
t.Errorf("Failed to schedule Pod %q: %v", pod.Name, err)
|
||||
} else {
|
||||
// Keep track of all the nodes that the Pods were scheduled on
|
||||
pod, err = config.client.CoreV1().Pods(config.ns).Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get Pod %q: %v", pod.Name, err)
|
||||
}
|
||||
if pod.Spec.NodeName == "" {
|
||||
t.Fatalf("Pod %q node name unset after scheduling", pod.Name)
|
||||
}
|
||||
scheduledNodes.Insert(pod.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate the affinity policy
|
||||
if anti {
|
||||
// The pods should have been spread across different nodes
|
||||
if scheduledNodes.Len() != numPods {
|
||||
t.Errorf("Pods were scheduled across %v nodes instead of %v", scheduledNodes.Len(), numPods)
|
||||
}
|
||||
} else {
|
||||
// The pods should have been scheduled on 1 node
|
||||
if scheduledNodes.Len() != 1 {
|
||||
t.Errorf("Pods were scheduled across %v nodes instead of %v", scheduledNodes.Len(), 1)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate PVC binding
|
||||
for _, pvc := range pvcs {
|
||||
validatePVCPhase(t, config.client, pvc.Name, config.ns, v1.ClaimBound)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeBindingWithAntiAffinity(t *testing.T) {
|
||||
numNodes := 10
|
||||
// Create as many pods as number of nodes
|
||||
numPods := numNodes
|
||||
// Create many more PVs on node1 to increase chance of selecting node1
|
||||
numPVsFirstNode := 10 * numNodes
|
||||
|
||||
testVolumeBindingWithAffinity(t, true, numNodes, numPods, numPVsFirstNode)
|
||||
}
|
||||
|
||||
func TestVolumeBindingWithAffinity(t *testing.T) {
|
||||
numPods := 10
|
||||
// Create many more nodes to increase chance of selecting a PV on a different node than node1
|
||||
numNodes := 10 * numPods
|
||||
// Create numPods PVs on the first node
|
||||
numPVsFirstNode := numPods
|
||||
|
||||
testVolumeBindingWithAffinity(t, true, numNodes, numPods, numPVsFirstNode)
|
||||
}
|
||||
|
||||
func TestPVAffinityConflict(t *testing.T) {
|
||||
config := setupCluster(t, "volume-scheduling", 3)
|
||||
features := map[string]bool{
|
||||
"VolumeScheduling": true,
|
||||
"PersistentLocalVolumes": true,
|
||||
}
|
||||
config := setupCluster(t, "volume-scheduling-", 3, features, 0, 0, false)
|
||||
defer config.teardown()
|
||||
|
||||
pv := makePV("local-pv", classImmediate, "", "", node1)
|
||||
@@ -361,30 +682,52 @@ func TestPVAffinityConflict(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func setupCluster(t *testing.T, nsName string, numberOfNodes int) *testConfig {
|
||||
// Enable feature gates
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true,PersistentLocalVolumes=true")
|
||||
func setupCluster(t *testing.T, nsName string, numberOfNodes int, features map[string]bool, resyncPeriod time.Duration, provisionDelaySeconds int, disableEquivalenceCache bool) *testConfig {
|
||||
oldFeatures := make(map[string]bool, len(features))
|
||||
for feature := range features {
|
||||
oldFeatures[feature] = utilfeature.DefaultFeatureGate.Enabled(utilfeature.Feature(feature))
|
||||
}
|
||||
// Set feature gates
|
||||
utilfeature.DefaultFeatureGate.SetFromMap(features)
|
||||
|
||||
controllerCh := make(chan struct{})
|
||||
|
||||
context := initTestScheduler(t, initTestMaster(t, nsName, nil), controllerCh, false, nil)
|
||||
context := initTestSchedulerWithOptions(t, initTestMaster(t, nsName, nil), controllerCh, false, nil, false, disableEquivalenceCache, resyncPeriod)
|
||||
|
||||
clientset := context.clientSet
|
||||
ns := context.ns.Name
|
||||
informers := context.informerFactory
|
||||
// Informers factory for controllers, we disable resync period for testing.
|
||||
informerFactory := informers.NewSharedInformerFactory(context.clientSet, 0)
|
||||
|
||||
// Start PV controller for volume binding.
|
||||
host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)
|
||||
plugin := &volumetest.FakeVolumePlugin{
|
||||
PluginName: provisionerPluginName,
|
||||
Host: host,
|
||||
Config: volume.VolumeConfig{},
|
||||
LastProvisionerOptions: volume.VolumeOptions{},
|
||||
ProvisionDelaySeconds: provisionDelaySeconds,
|
||||
NewAttacherCallCount: 0,
|
||||
NewDetacherCallCount: 0,
|
||||
Mounters: nil,
|
||||
Unmounters: nil,
|
||||
Attachers: nil,
|
||||
Detachers: nil,
|
||||
}
|
||||
plugins := []volume.VolumePlugin{plugin}
|
||||
|
||||
controllerOptions := persistentvolumeoptions.NewPersistentVolumeControllerOptions()
|
||||
params := persistentvolume.ControllerParameters{
|
||||
KubeClient: clientset,
|
||||
SyncPeriod: time.Hour, // test shouldn't need to resync
|
||||
VolumePlugins: nil, // TODO; need later for dynamic provisioning
|
||||
SyncPeriod: controllerOptions.PVClaimBinderSyncPeriod,
|
||||
VolumePlugins: plugins,
|
||||
Cloud: nil,
|
||||
ClusterName: "volume-test-cluster",
|
||||
VolumeInformer: informers.Core().V1().PersistentVolumes(),
|
||||
ClaimInformer: informers.Core().V1().PersistentVolumeClaims(),
|
||||
ClassInformer: informers.Storage().V1().StorageClasses(),
|
||||
PodInformer: informers.Core().V1().Pods(),
|
||||
NodeInformer: informers.Core().V1().Nodes(),
|
||||
VolumeInformer: informerFactory.Core().V1().PersistentVolumes(),
|
||||
ClaimInformer: informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
ClassInformer: informerFactory.Storage().V1().StorageClasses(),
|
||||
PodInformer: informerFactory.Core().V1().Pods(),
|
||||
NodeInformer: informerFactory.Core().V1().Nodes(),
|
||||
EnableDynamicProvisioning: true,
|
||||
}
|
||||
ctrl, err := persistentvolume.NewController(params)
|
||||
@@ -392,6 +735,9 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int) *testConfig {
|
||||
t.Fatalf("Failed to create PV controller: %v", err)
|
||||
}
|
||||
go ctrl.Run(controllerCh)
|
||||
// Start informer factory after all controllers are configured and running.
|
||||
informerFactory.Start(controllerCh)
|
||||
informerFactory.WaitForCacheSync(controllerCh)
|
||||
|
||||
// Create shared objects
|
||||
// Create nodes
|
||||
@@ -422,11 +768,7 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int) *testConfig {
|
||||
}
|
||||
|
||||
// Create SCs
|
||||
scs := []*storagev1.StorageClass{
|
||||
makeStorageClass(classImmediate, &modeImmediate),
|
||||
makeStorageClass(classWait, &modeWait),
|
||||
}
|
||||
for _, sc := range scs {
|
||||
for _, sc := range sharedClasses {
|
||||
if _, err := clientset.StorageV1().StorageClasses().Create(sc); err != nil {
|
||||
t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err)
|
||||
}
|
||||
@@ -439,7 +781,8 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int) *testConfig {
|
||||
teardown: func() {
|
||||
deleteTestObjects(clientset, ns, nil)
|
||||
cleanupTest(t, context)
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false,LocalPersistentVolumes=false")
|
||||
// Restore feature gates
|
||||
utilfeature.DefaultFeatureGate.SetFromMap(oldFeatures)
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -461,6 +804,16 @@ func makeStorageClass(name string, mode *storagev1.VolumeBindingMode) *storagev1
|
||||
}
|
||||
}
|
||||
|
||||
func makeDynamicProvisionerStorageClass(name string, mode *storagev1.VolumeBindingMode) *storagev1.StorageClass {
|
||||
return &storagev1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Provisioner: provisionerPluginName,
|
||||
VolumeBindingMode: mode,
|
||||
}
|
||||
}
|
||||
|
||||
func makePV(name, scName, pvcName, ns, node string) *v1.PersistentVolume {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -543,12 +896,15 @@ func makePod(name, ns string, pvcs []string) *v1.Pod {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
Labels: map[string]string{
|
||||
"app": "volume-binding-test",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: "k8s.gcr.io/busybox:1.24",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", "while true; do sleep 1; done"},
|
||||
},
|
||||
|
14
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/BUILD
generated
vendored
14
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/BUILD
generated
vendored
@@ -16,10 +16,10 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration/util:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -36,13 +36,13 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
6
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_test.go
generated
vendored
@@ -162,8 +162,12 @@ func schedulePods(config *testConfig) int32 {
|
||||
// return the worst-case-scenario interval that was seen during this time.
|
||||
// Note this should never be low due to cold-start, so allow bake in sched time if necessary.
|
||||
if len(scheduled) >= config.numPods {
|
||||
consumed := int(time.Since(start) / time.Second)
|
||||
if consumed <= 0 {
|
||||
consumed = 1
|
||||
}
|
||||
fmt.Printf("Scheduled %v Pods in %v seconds (%v per second on average). min QPS was %v\n",
|
||||
config.numPods, int(time.Since(start)/time.Second), config.numPods/int(time.Since(start)/time.Second), minQps)
|
||||
config.numPods, consumed, config.numPods/consumed, minQps)
|
||||
return minQps
|
||||
}
|
||||
|
||||
|
10
vendor/k8s.io/kubernetes/test/integration/secrets/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/test/integration/secrets/BUILD
generated
vendored
@@ -14,13 +14,13 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
35
vendor/k8s.io/kubernetes/test/integration/serviceaccount/BUILD
generated
vendored
35
vendor/k8s.io/kubernetes/test/integration/serviceaccount/BUILD
generated
vendored
@@ -14,30 +14,27 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/serviceaccount:go_default_library",
|
||||
"//pkg/serviceaccount:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//plugin/pkg/admission/serviceaccount:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/request/bearertoken:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/request/union:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/request/union:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
22
vendor/k8s.io/kubernetes/test/integration/serviceaccount/service_account_test.go
generated
vendored
22
vendor/k8s.io/kubernetes/test/integration/serviceaccount/service_account_test.go
generated
vendored
@@ -43,14 +43,12 @@ import (
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
serviceaccountadmission "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
@@ -363,7 +361,7 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
||||
// Root client
|
||||
// TODO: remove rootClient after we refactor pkg/admission to use the clientset.
|
||||
rootClientset := clientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}, BearerToken: rootToken})
|
||||
internalRootClientset := internalclientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}, BearerToken: rootToken})
|
||||
externalRootClientset := kubernetes.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}, BearerToken: rootToken})
|
||||
// Set up two authenticators:
|
||||
// 1. A token authenticator that maps the rootToken to the "root" user
|
||||
// 2. A ServiceAccountToken authenticator that validates ServiceAccount tokens
|
||||
@@ -418,9 +416,9 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
||||
|
||||
// Set up admission plugin to auto-assign serviceaccounts to pods
|
||||
serviceAccountAdmission := serviceaccountadmission.NewServiceAccount()
|
||||
serviceAccountAdmission.SetInternalKubeClientSet(internalRootClientset)
|
||||
internalInformers := internalinformers.NewSharedInformerFactory(internalRootClientset, controller.NoResyncPeriodFunc())
|
||||
serviceAccountAdmission.SetInternalKubeInformerFactory(internalInformers)
|
||||
serviceAccountAdmission.SetExternalKubeClientSet(externalRootClientset)
|
||||
externalInformers := informers.NewSharedInformerFactory(externalRootClientset, controller.NoResyncPeriodFunc())
|
||||
serviceAccountAdmission.SetExternalKubeInformerFactory(externalInformers)
|
||||
informers := informers.NewSharedInformerFactory(rootClientset, controller.NoResyncPeriodFunc())
|
||||
|
||||
masterConfig := framework.NewMasterConfig()
|
||||
@@ -437,19 +435,21 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
||||
apiServer.Close()
|
||||
}
|
||||
|
||||
metrics.UnregisterMetricAndUntrackRateLimiterUsage("serviceaccount_tokens_controller")
|
||||
tokenGenerator, err := serviceaccount.JWTTokenGenerator(serviceaccount.LegacyIssuer, serviceAccountKey)
|
||||
if err != nil {
|
||||
return rootClientset, clientConfig, stop, err
|
||||
}
|
||||
tokenController, err := serviceaccountcontroller.NewTokensController(
|
||||
informers.Core().V1().ServiceAccounts(),
|
||||
informers.Core().V1().Secrets(),
|
||||
rootClientset,
|
||||
serviceaccountcontroller.TokensControllerOptions{TokenGenerator: serviceaccount.JWTTokenGenerator(serviceaccount.LegacyIssuer, serviceAccountKey)},
|
||||
serviceaccountcontroller.TokensControllerOptions{TokenGenerator: tokenGenerator},
|
||||
)
|
||||
if err != nil {
|
||||
return rootClientset, clientConfig, stop, err
|
||||
}
|
||||
go tokenController.Run(1, stopCh)
|
||||
|
||||
metrics.UnregisterMetricAndUntrackRateLimiterUsage("serviceaccount_controller")
|
||||
serviceAccountController, err := serviceaccountcontroller.NewServiceAccountsController(
|
||||
informers.Core().V1().ServiceAccounts(),
|
||||
informers.Core().V1().Namespaces(),
|
||||
@@ -460,7 +460,7 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
||||
return rootClientset, clientConfig, stop, err
|
||||
}
|
||||
informers.Start(stopCh)
|
||||
internalInformers.Start(stopCh)
|
||||
externalInformers.Start(stopCh)
|
||||
go serviceAccountController.Run(5, stopCh)
|
||||
|
||||
return rootClientset, clientConfig, stop, nil
|
||||
|
32
vendor/k8s.io/kubernetes/test/integration/statefulset/BUILD
generated
vendored
32
vendor/k8s.io/kubernetes/test/integration/statefulset/BUILD
generated
vendored
@@ -8,19 +8,19 @@ go_library(
|
||||
deps = [
|
||||
#"//pkg/api:go_default_library",
|
||||
"//pkg/controller/statefulset:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -34,11 +34,11 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
14
vendor/k8s.io/kubernetes/test/integration/storageclasses/BUILD
generated
vendored
14
vendor/k8s.io/kubernetes/test/integration/storageclasses/BUILD
generated
vendored
@@ -14,14 +14,14 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
16
vendor/k8s.io/kubernetes/test/integration/ttlcontroller/BUILD
generated
vendored
16
vendor/k8s.io/kubernetes/test/integration/ttlcontroller/BUILD
generated
vendored
@@ -18,15 +18,15 @@ go_test(
|
||||
],
|
||||
deps = [
|
||||
"//pkg/controller/ttl:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/ttlcontroller/ttlcontroller_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/ttlcontroller/ttlcontroller_test.go
generated
vendored
@@ -75,7 +75,7 @@ func deleteNodes(t *testing.T, client *clientset.Clientset, startIndex, endIndex
|
||||
defer wg.Done()
|
||||
name := fmt.Sprintf("node-%d", idx)
|
||||
if err := client.Core().Nodes().Delete(name, &metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
t.Fatalf("Failed to delete node: %v", err)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
13
vendor/k8s.io/kubernetes/test/integration/util/BUILD
generated
vendored
13
vendor/k8s.io/kubernetes/test/integration/util/BUILD
generated
vendored
@@ -18,16 +18,17 @@ go_library(
|
||||
"//pkg/cloudprovider/providers/gce/cloud:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/oauth2:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
36
vendor/k8s.io/kubernetes/test/integration/util/util.go
generated
vendored
36
vendor/k8s.io/kubernetes/test/integration/util/util.go
generated
vendored
@@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
@@ -97,21 +98,22 @@ func createSchedulerConfigurator(
|
||||
// Enable EnableEquivalenceClassCache for all integration tests.
|
||||
utilfeature.DefaultFeatureGate.Set("EnableEquivalenceClassCache=true")
|
||||
|
||||
return factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
clientSet,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.EnableEquivalenceClassCache),
|
||||
false,
|
||||
)
|
||||
return factory.NewConfigFactory(&factory.ConfigFactoryArgs{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
Client: clientSet,
|
||||
NodeInformer: informerFactory.Core().V1().Nodes(),
|
||||
PodInformer: informerFactory.Core().V1().Pods(),
|
||||
PvInformer: informerFactory.Core().V1().PersistentVolumes(),
|
||||
PvcInformer: informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
ReplicationControllerInformer: informerFactory.Core().V1().ReplicationControllers(),
|
||||
ReplicaSetInformer: informerFactory.Apps().V1().ReplicaSets(),
|
||||
StatefulSetInformer: informerFactory.Apps().V1().StatefulSets(),
|
||||
ServiceInformer: informerFactory.Core().V1().Services(),
|
||||
PdbInformer: informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
StorageClassInformer: informerFactory.Storage().V1().StorageClasses(),
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
EnableEquivalenceClassCache: utilfeature.DefaultFeatureGate.Enabled(features.EnableEquivalenceClassCache),
|
||||
DisablePreemption: false,
|
||||
PercentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
|
||||
})
|
||||
}
|
||||
|
25
vendor/k8s.io/kubernetes/test/integration/volume/BUILD
generated
vendored
25
vendor/k8s.io/kubernetes/test/integration/volume/BUILD
generated
vendored
@@ -20,23 +20,24 @@ go_test(
|
||||
"//pkg/controller/volume/attachdetach:go_default_library",
|
||||
"//pkg/controller/volume/attachdetach/cache:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume/options:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/reference:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/reference:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
191
vendor/k8s.io/kubernetes/test/integration/volume/attach_detach_test.go
generated
vendored
191
vendor/k8s.io/kubernetes/test/integration/volume/attach_detach_test.go
generated
vendored
@@ -17,11 +17,13 @@ limitations under the License.
|
||||
package volume
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@@ -32,6 +34,8 @@ import (
|
||||
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach"
|
||||
volumecache "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
|
||||
persistentvolumeoptions "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/options"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
@@ -73,8 +77,68 @@ func fakePodWithVol(namespace string) *v1.Pod {
|
||||
return fakePod
|
||||
}
|
||||
|
||||
func fakePodWithPVC(name, pvcName, namespace string) (*v1.Pod, *v1.PersistentVolumeClaim) {
|
||||
fakePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-container",
|
||||
Image: "nginx",
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "fake-mount",
|
||||
MountPath: "/var/www/html",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "fake-mount",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvcName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
NodeName: "node-sandbox",
|
||||
},
|
||||
}
|
||||
class := "fake-sc"
|
||||
fakePVC := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: pvcName,
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("5Gi"),
|
||||
},
|
||||
},
|
||||
StorageClassName: &class,
|
||||
},
|
||||
}
|
||||
return fakePod, fakePVC
|
||||
}
|
||||
|
||||
type podCountFunc func(int) bool
|
||||
|
||||
var defaultTimerConfig = attachdetach.TimerConfig{
|
||||
ReconcilerLoopPeriod: 100 * time.Millisecond,
|
||||
ReconcilerMaxWaitForUnmountDuration: 6 * time.Second,
|
||||
DesiredStateOfWorldPopulatorLoopSleepPeriod: 1 * time.Second,
|
||||
DesiredStateOfWorldPopulatorListPodsRetryDuration: 3 * time.Second,
|
||||
}
|
||||
|
||||
// Via integration test we can verify that if pod delete
|
||||
// event is somehow missed by AttachDetach controller - it still
|
||||
// gets cleaned up by Desired State of World populator.
|
||||
@@ -94,7 +158,7 @@ func TestPodDeletionWithDswp(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace(namespaceName, server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
testClient, ctrl, informers := createAdClients(ns, t, server, defaultSyncPeriod)
|
||||
testClient, ctrl, _, informers := createAdClients(ns, t, server, defaultSyncPeriod, defaultTimerConfig)
|
||||
pod := fakePodWithVol(namespaceName)
|
||||
podStopCh := make(chan struct{})
|
||||
|
||||
@@ -160,7 +224,7 @@ func TestPodUpdateWithWithADC(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace(namespaceName, server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
testClient, ctrl, informers := createAdClients(ns, t, server, defaultSyncPeriod)
|
||||
testClient, ctrl, _, informers := createAdClients(ns, t, server, defaultSyncPeriod, defaultTimerConfig)
|
||||
|
||||
pod := fakePodWithVol(namespaceName)
|
||||
podStopCh := make(chan struct{})
|
||||
@@ -228,7 +292,7 @@ func TestPodUpdateWithKeepTerminatedPodVolumes(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace(namespaceName, server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
testClient, ctrl, informers := createAdClients(ns, t, server, defaultSyncPeriod)
|
||||
testClient, ctrl, _, informers := createAdClients(ns, t, server, defaultSyncPeriod, defaultTimerConfig)
|
||||
|
||||
pod := fakePodWithVol(namespaceName)
|
||||
podStopCh := make(chan struct{})
|
||||
@@ -320,7 +384,7 @@ func waitForPodFuncInDSWP(t *testing.T, dswp volumecache.DesiredStateOfWorld, ch
|
||||
}
|
||||
}
|
||||
|
||||
func createAdClients(ns *v1.Namespace, t *testing.T, server *httptest.Server, syncPeriod time.Duration) (*clientset.Clientset, attachdetach.AttachDetachController, informers.SharedInformerFactory) {
|
||||
func createAdClients(ns *v1.Namespace, t *testing.T, server *httptest.Server, syncPeriod time.Duration, timers attachdetach.TimerConfig) (*clientset.Clientset, attachdetach.AttachDetachController, *persistentvolume.PersistentVolumeController, informers.SharedInformerFactory) {
|
||||
config := restclient.Config{
|
||||
Host: server.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}},
|
||||
@@ -346,14 +410,9 @@ func createAdClients(ns *v1.Namespace, t *testing.T, server *httptest.Server, sy
|
||||
plugins := []volume.VolumePlugin{plugin}
|
||||
cloud := &fakecloud.FakeCloud{}
|
||||
informers := informers.NewSharedInformerFactory(testClient, resyncPeriod)
|
||||
timers := attachdetach.TimerConfig{
|
||||
ReconcilerLoopPeriod: 100 * time.Millisecond,
|
||||
ReconcilerMaxWaitForUnmountDuration: 6 * time.Second,
|
||||
DesiredStateOfWorldPopulatorLoopSleepPeriod: 1 * time.Second,
|
||||
DesiredStateOfWorldPopulatorListPodsRetryDuration: 3 * time.Second,
|
||||
}
|
||||
ctrl, err := attachdetach.NewAttachDetachController(
|
||||
testClient,
|
||||
nil, /* csiClient */
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Core().V1().Nodes(),
|
||||
informers.Core().V1().PersistentVolumeClaims(),
|
||||
@@ -368,7 +427,27 @@ func createAdClients(ns *v1.Namespace, t *testing.T, server *httptest.Server, sy
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating AttachDetach : %v", err)
|
||||
}
|
||||
return testClient, ctrl, informers
|
||||
|
||||
// create pv controller
|
||||
controllerOptions := persistentvolumeoptions.NewPersistentVolumeControllerOptions()
|
||||
params := persistentvolume.ControllerParameters{
|
||||
KubeClient: testClient,
|
||||
SyncPeriod: controllerOptions.PVClaimBinderSyncPeriod,
|
||||
VolumePlugins: plugins,
|
||||
Cloud: nil,
|
||||
ClusterName: "volume-test-cluster",
|
||||
VolumeInformer: informers.Core().V1().PersistentVolumes(),
|
||||
ClaimInformer: informers.Core().V1().PersistentVolumeClaims(),
|
||||
ClassInformer: informers.Storage().V1().StorageClasses(),
|
||||
PodInformer: informers.Core().V1().Pods(),
|
||||
NodeInformer: informers.Core().V1().Nodes(),
|
||||
EnableDynamicProvisioning: false,
|
||||
}
|
||||
pvCtrl, err := persistentvolume.NewController(params)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create PV controller: %v", err)
|
||||
}
|
||||
return testClient, ctrl, pvCtrl, informers
|
||||
}
|
||||
|
||||
// Via integration test we can verify that if pod add
|
||||
@@ -391,7 +470,7 @@ func TestPodAddedByDswp(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace(namespaceName, server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
testClient, ctrl, informers := createAdClients(ns, t, server, defaultSyncPeriod)
|
||||
testClient, ctrl, _, informers := createAdClients(ns, t, server, defaultSyncPeriod, defaultTimerConfig)
|
||||
|
||||
pod := fakePodWithVol(namespaceName)
|
||||
podStopCh := make(chan struct{})
|
||||
@@ -446,3 +525,91 @@ func TestPodAddedByDswp(t *testing.T) {
|
||||
|
||||
close(stopCh)
|
||||
}
|
||||
|
||||
func TestPVCBoundWithADC(t *testing.T) {
|
||||
_, server, closeFn := framework.RunAMaster(framework.NewIntegrationTestMasterConfig())
|
||||
defer closeFn()
|
||||
namespaceName := "test-pod-deletion"
|
||||
|
||||
ns := framework.CreateTestingNamespace(namespaceName, server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
testClient, ctrl, pvCtrl, informers := createAdClients(ns, t, server, defaultSyncPeriod, attachdetach.TimerConfig{
|
||||
ReconcilerLoopPeriod: 100 * time.Millisecond,
|
||||
ReconcilerMaxWaitForUnmountDuration: 6 * time.Second,
|
||||
DesiredStateOfWorldPopulatorLoopSleepPeriod: 24 * time.Hour,
|
||||
// Use high duration to disable DesiredStateOfWorldPopulator.findAndAddActivePods loop in test.
|
||||
DesiredStateOfWorldPopulatorListPodsRetryDuration: 24 * time.Hour,
|
||||
})
|
||||
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-sandbox",
|
||||
Annotations: map[string]string{
|
||||
util.ControllerManagedAttachAnnotation: "true",
|
||||
},
|
||||
},
|
||||
}
|
||||
if _, err := testClient.Core().Nodes().Create(node); err != nil {
|
||||
t.Fatalf("Failed to created node : %v", err)
|
||||
}
|
||||
|
||||
// pods with pvc not bound
|
||||
pvcs := []*v1.PersistentVolumeClaim{}
|
||||
for i := 0; i < 3; i++ {
|
||||
pod, pvc := fakePodWithPVC(fmt.Sprintf("fakepod-pvcnotbound-%d", i), fmt.Sprintf("fakepvc-%d", i), namespaceName)
|
||||
if _, err := testClient.Core().Pods(pod.Namespace).Create(pod); err != nil {
|
||||
t.Errorf("Failed to create pod : %v", err)
|
||||
}
|
||||
if _, err := testClient.Core().PersistentVolumeClaims(pvc.Namespace).Create(pvc); err != nil {
|
||||
t.Errorf("Failed to create pvc : %v", err)
|
||||
}
|
||||
pvcs = append(pvcs, pvc)
|
||||
}
|
||||
// pod with no pvc
|
||||
podNew := fakePodWithVol(namespaceName)
|
||||
podNew.SetName("fakepod")
|
||||
if _, err := testClient.Core().Pods(podNew.Namespace).Create(podNew); err != nil {
|
||||
t.Errorf("Failed to create pod : %v", err)
|
||||
}
|
||||
|
||||
// start controller loop
|
||||
stopCh := make(chan struct{})
|
||||
informers.Start(stopCh)
|
||||
informers.WaitForCacheSync(stopCh)
|
||||
go ctrl.Run(stopCh)
|
||||
go pvCtrl.Run(stopCh)
|
||||
|
||||
waitToObservePods(t, informers.Core().V1().Pods().Informer(), 4)
|
||||
// Give attachdetach controller enough time to populate pods into DSWP.
|
||||
time.Sleep(10 * time.Second)
|
||||
waitForPodFuncInDSWP(t, ctrl.GetDesiredStateOfWorld(), 60*time.Second, "expected 1 pod in dsw", 1)
|
||||
for _, pvc := range pvcs {
|
||||
createPVForPVC(t, testClient, pvc)
|
||||
}
|
||||
waitForPodFuncInDSWP(t, ctrl.GetDesiredStateOfWorld(), 60*time.Second, "expected 4 pods in dsw after PVCs are bound", 4)
|
||||
close(stopCh)
|
||||
}
|
||||
|
||||
// Create PV for PVC, pv controller will bind them together.
|
||||
func createPVForPVC(t *testing.T, testClient *clientset.Clientset, pvc *v1.PersistentVolumeClaim) {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("fakepv-%s", pvc.Name),
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: pvc.Spec.Resources.Requests,
|
||||
AccessModes: pvc.Spec.AccessModes,
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: "/var/www/html",
|
||||
},
|
||||
},
|
||||
ClaimRef: &v1.ObjectReference{Name: pvc.Name, Namespace: pvc.Namespace},
|
||||
StorageClassName: *pvc.Spec.StorageClassName,
|
||||
},
|
||||
}
|
||||
if _, err := testClient.Core().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Errorf("Failed to create pv : %v", err)
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user