Bumping k8s dependencies to 1.13
This commit is contained in:
145
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_test.go
generated
vendored
145
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_test.go
generated
vendored
@@ -25,18 +25,17 @@ import (
|
||||
"time"
|
||||
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
@@ -51,6 +50,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/images"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/logs"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network/dns"
|
||||
"k8s.io/kubernetes/pkg/kubelet/pleg"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
|
||||
@@ -104,7 +104,6 @@ func (f *fakeImageGCManager) GetImageList() ([]kubecontainer.Image, error) {
|
||||
type TestKubelet struct {
|
||||
kubelet *Kubelet
|
||||
fakeRuntime *containertest.FakeRuntime
|
||||
fakeCadvisor *cadvisortest.Mock
|
||||
fakeKubeClient *fake.Clientset
|
||||
fakeMirrorClient *podtest.FakeMirrorClient
|
||||
fakeClock *clock.FakeClock
|
||||
@@ -118,11 +117,6 @@ func (tk *TestKubelet) Cleanup() {
|
||||
}
|
||||
}
|
||||
|
||||
func (tk *TestKubelet) chainMock() {
|
||||
tk.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
tk.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
}
|
||||
|
||||
// newTestKubelet returns test kubelet with two images.
|
||||
func newTestKubelet(t *testing.T, controllerAttachDetachEnabled bool) *TestKubelet {
|
||||
imageList := []kubecontainer.Image{
|
||||
@@ -178,7 +172,7 @@ func newTestKubeletWithImageList(
|
||||
kubelet := &Kubelet{}
|
||||
kubelet.recorder = fakeRecorder
|
||||
kubelet.kubeClient = fakeKubeClient
|
||||
kubelet.heartbeatClient = fakeKubeClient.CoreV1()
|
||||
kubelet.heartbeatClient = fakeKubeClient
|
||||
kubelet.os = &containertest.FakeOS{}
|
||||
kubelet.mounter = &mount.FakeMounter{}
|
||||
|
||||
@@ -186,7 +180,7 @@ func newTestKubeletWithImageList(
|
||||
kubelet.nodeName = types.NodeName(testKubeletHostname)
|
||||
kubelet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
|
||||
kubelet.runtimeState.setNetworkState(nil)
|
||||
if tempDir, err := ioutil.TempDir("/tmp", "kubelet_test."); err != nil {
|
||||
if tempDir, err := ioutil.TempDir("", "kubelet_test."); err != nil {
|
||||
t.Fatalf("can't make a temp rootdir: %v", err)
|
||||
} else {
|
||||
kubelet.rootDirectory = tempDir
|
||||
@@ -228,8 +222,9 @@ func newTestKubeletWithImageList(
|
||||
}
|
||||
kubelet.daemonEndpoints = &v1.NodeDaemonEndpoints{}
|
||||
|
||||
mockCadvisor := &cadvisortest.Mock{}
|
||||
kubelet.cadvisor = mockCadvisor
|
||||
kubelet.cadvisor = &cadvisortest.Fake{}
|
||||
machineInfo, _ := kubelet.cadvisor.MachineInfo()
|
||||
kubelet.machineInfo = machineInfo
|
||||
|
||||
fakeMirrorClient := podtest.NewFakeMirrorClient()
|
||||
secretManager := secret.NewSimpleSecretManager(kubelet.kubeClient)
|
||||
@@ -298,7 +293,6 @@ func newTestKubeletWithImageList(
|
||||
// Relist period does not affect the tests.
|
||||
kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, 100, time.Hour, nil, clock.RealClock{})
|
||||
kubelet.clock = fakeClock
|
||||
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
|
||||
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
@@ -344,13 +338,15 @@ func newTestKubeletWithImageList(
|
||||
false, /* experimentalCheckNodeCapabilitiesBeforeMount*/
|
||||
false /* keepTerminatedPodVolumes */)
|
||||
|
||||
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
|
||||
|
||||
// enable active deadline handler
|
||||
activeDeadlineHandler, err := newActiveDeadlineHandler(kubelet.statusManager, kubelet.recorder, kubelet.clock)
|
||||
require.NoError(t, err, "Can't initialize active deadline handler")
|
||||
|
||||
kubelet.AddPodSyncLoopHandler(activeDeadlineHandler)
|
||||
kubelet.AddPodSyncHandler(activeDeadlineHandler)
|
||||
return &TestKubelet{kubelet, fakeRuntime, mockCadvisor, fakeKubeClient, fakeMirrorClient, fakeClock, nil, plug}
|
||||
return &TestKubelet{kubelet, fakeRuntime, fakeKubeClient, fakeMirrorClient, fakeClock, nil, plug}
|
||||
}
|
||||
|
||||
func newTestPods(count int) []*v1.Pod {
|
||||
@@ -436,10 +432,6 @@ func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
|
||||
fakeRuntime.AssertKilledPods([]string{"12345678"})
|
||||
}
|
||||
|
||||
type testNodeLister struct {
|
||||
nodes []*v1.Node
|
||||
}
|
||||
|
||||
type testNodeInfo struct {
|
||||
nodes []*v1.Node
|
||||
}
|
||||
@@ -453,10 +445,6 @@ func (ls testNodeInfo) GetNodeInfo(id string) (*v1.Node, error) {
|
||||
return nil, fmt.Errorf("Node with name: %s does not exist", id)
|
||||
}
|
||||
|
||||
func (ls testNodeLister) List(selector labels.Selector) ([]*v1.Node, error) {
|
||||
return ls.nodes, nil
|
||||
}
|
||||
|
||||
func checkPodStatus(t *testing.T, kl *Kubelet, pod *v1.Pod, phase v1.PodPhase) {
|
||||
status, found := kl.statusManager.GetPodStatus(pod.UID)
|
||||
require.True(t, found, "Status of pod %q is not found in the status map", pod.UID)
|
||||
@@ -467,7 +455,6 @@ func checkPodStatus(t *testing.T, kl *Kubelet, pod *v1.Pod, phase v1.PodPhase) {
|
||||
func TestHandlePortConflicts(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
kl := testKubelet.kubelet
|
||||
|
||||
kl.nodeInfo = testNodeInfo{nodes: []*v1.Node{
|
||||
@@ -481,6 +468,16 @@ func TestHandlePortConflicts(t *testing.T) {
|
||||
},
|
||||
}}
|
||||
|
||||
recorder := record.NewFakeRecorder(20)
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string("testNode"),
|
||||
UID: types.UID("testNode"),
|
||||
Namespace: "",
|
||||
}
|
||||
testClusterDNSDomain := "TEST"
|
||||
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
|
||||
|
||||
spec := v1.PodSpec{NodeName: string(kl.nodeName), Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}
|
||||
pods := []*v1.Pod{
|
||||
podWithUIDNameNsSpec("123456789", "newpod", "foo", spec),
|
||||
@@ -504,7 +501,6 @@ func TestHandlePortConflicts(t *testing.T) {
|
||||
func TestHandleHostNameConflicts(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
kl := testKubelet.kubelet
|
||||
|
||||
kl.nodeInfo = testNodeInfo{nodes: []*v1.Node{
|
||||
@@ -518,6 +514,16 @@ func TestHandleHostNameConflicts(t *testing.T) {
|
||||
},
|
||||
}}
|
||||
|
||||
recorder := record.NewFakeRecorder(20)
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string("testNode"),
|
||||
UID: types.UID("testNode"),
|
||||
Namespace: "",
|
||||
}
|
||||
testClusterDNSDomain := "TEST"
|
||||
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
|
||||
|
||||
// default NodeName in test is 127.0.0.1
|
||||
pods := []*v1.Pod{
|
||||
podWithUIDNameNsSpec("123456789", "notfittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.2"}),
|
||||
@@ -538,7 +544,6 @@ func TestHandleHostNameConflicts(t *testing.T) {
|
||||
func TestHandleNodeSelector(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
kl := testKubelet.kubelet
|
||||
nodes := []*v1.Node{
|
||||
{
|
||||
@@ -551,6 +556,17 @@ func TestHandleNodeSelector(t *testing.T) {
|
||||
},
|
||||
}
|
||||
kl.nodeInfo = testNodeInfo{nodes: nodes}
|
||||
|
||||
recorder := record.NewFakeRecorder(20)
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string("testNode"),
|
||||
UID: types.UID("testNode"),
|
||||
Namespace: "",
|
||||
}
|
||||
testClusterDNSDomain := "TEST"
|
||||
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
|
||||
|
||||
pods := []*v1.Pod{
|
||||
podWithUIDNameNsSpec("123456789", "podA", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "A"}}),
|
||||
podWithUIDNameNsSpec("987654321", "podB", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "B"}}),
|
||||
@@ -570,7 +586,6 @@ func TestHandleNodeSelector(t *testing.T) {
|
||||
func TestHandleMemExceeded(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
kl := testKubelet.kubelet
|
||||
nodes := []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
@@ -582,6 +597,16 @@ func TestHandleMemExceeded(t *testing.T) {
|
||||
}
|
||||
kl.nodeInfo = testNodeInfo{nodes: nodes}
|
||||
|
||||
recorder := record.NewFakeRecorder(20)
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string("testNode"),
|
||||
UID: types.UID("testNode"),
|
||||
Namespace: "",
|
||||
}
|
||||
testClusterDNSDomain := "TEST"
|
||||
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
|
||||
|
||||
spec := v1.PodSpec{NodeName: string(kl.nodeName),
|
||||
Containers: []v1.Container{{Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
@@ -612,7 +637,6 @@ func TestHandleMemExceeded(t *testing.T) {
|
||||
func TestHandlePluginResources(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
kl := testKubelet.kubelet
|
||||
|
||||
adjustedResource := v1.ResourceName("domain1.com/adjustedResource")
|
||||
@@ -668,6 +692,16 @@ func TestHandlePluginResources(t *testing.T) {
|
||||
kl.admitHandlers = lifecycle.PodAdmitHandlers{}
|
||||
kl.admitHandlers.AddPodAdmitHandler(lifecycle.NewPredicateAdmitHandler(kl.getNodeAnyWay, lifecycle.NewAdmissionFailureHandlerStub(), updatePluginResourcesFunc))
|
||||
|
||||
recorder := record.NewFakeRecorder(20)
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string("testNode"),
|
||||
UID: types.UID("testNode"),
|
||||
Namespace: "",
|
||||
}
|
||||
testClusterDNSDomain := "TEST"
|
||||
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
|
||||
|
||||
// pod requiring adjustedResource can be successfully allocated because updatePluginResourcesFunc
|
||||
// adjusts node.allocatableResource for this resource to a sufficient value.
|
||||
fittingPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
|
||||
@@ -737,13 +771,6 @@ func TestHandlePluginResources(t *testing.T) {
|
||||
func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
versionInfo := &cadvisorapi.VersionInfo{
|
||||
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
||||
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
||||
DockerVersion: "1.5.0",
|
||||
}
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
||||
|
||||
kl := testKubelet.kubelet
|
||||
pods := []*v1.Pod{
|
||||
@@ -917,7 +944,6 @@ func TestCreateMirrorPod(t *testing.T) {
|
||||
for _, updateType := range []kubetypes.SyncPodType{kubetypes.SyncPodCreate, kubetypes.SyncPodUpdate} {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
|
||||
kl := testKubelet.kubelet
|
||||
manager := testKubelet.fakeMirrorClient
|
||||
@@ -940,7 +966,6 @@ func TestCreateMirrorPod(t *testing.T) {
|
||||
func TestDeleteOutdatedMirrorPod(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
|
||||
kl := testKubelet.kubelet
|
||||
manager := testKubelet.fakeMirrorClient
|
||||
@@ -979,7 +1004,6 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) {
|
||||
func TestDeleteOrphanedMirrorPods(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
|
||||
kl := testKubelet.kubelet
|
||||
manager := testKubelet.fakeMirrorClient
|
||||
@@ -1058,20 +1082,10 @@ func TestGetContainerInfoForMirrorPods(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
containerID := "ab2cdf"
|
||||
containerPath := fmt.Sprintf("/docker/%v", containerID)
|
||||
containerInfo := cadvisorapi.ContainerInfo{
|
||||
ContainerReference: cadvisorapi.ContainerReference{
|
||||
Name: containerPath,
|
||||
},
|
||||
}
|
||||
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
fakeRuntime := testKubelet.fakeRuntime
|
||||
mockCadvisor := testKubelet.fakeCadvisor
|
||||
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
|
||||
mockCadvisor.On("DockerContainer", containerID, cadvisorReq).Return(containerInfo, nil)
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
fakeRuntime.PodList = []*containertest.FakePod{
|
||||
@@ -1082,7 +1096,7 @@ func TestGetContainerInfoForMirrorPods(t *testing.T) {
|
||||
Containers: []*kubecontainer.Container{
|
||||
{
|
||||
Name: "foo",
|
||||
ID: kubecontainer.ContainerID{Type: "test", ID: containerID},
|
||||
ID: kubecontainer.ContainerID{Type: "test", ID: "ab2cdf"},
|
||||
},
|
||||
},
|
||||
}},
|
||||
@@ -1093,13 +1107,11 @@ func TestGetContainerInfoForMirrorPods(t *testing.T) {
|
||||
stats, err := kubelet.GetContainerInfo("qux_ns", "5678", "foo", cadvisorReq)
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, stats)
|
||||
mockCadvisor.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestHostNetworkAllowed(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
@@ -1128,7 +1140,6 @@ func TestHostNetworkAllowed(t *testing.T) {
|
||||
func TestHostNetworkDisallowed(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
@@ -1156,7 +1167,6 @@ func TestHostNetworkDisallowed(t *testing.T) {
|
||||
func TestHostPIDAllowed(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
@@ -1185,7 +1195,6 @@ func TestHostPIDAllowed(t *testing.T) {
|
||||
func TestHostPIDDisallowed(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
@@ -1213,7 +1222,6 @@ func TestHostPIDDisallowed(t *testing.T) {
|
||||
func TestHostIPCAllowed(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
@@ -1242,7 +1250,6 @@ func TestHostIPCAllowed(t *testing.T) {
|
||||
func TestHostIPCDisallowed(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
@@ -1270,7 +1277,6 @@ func TestHostIPCDisallowed(t *testing.T) {
|
||||
func TestPrivilegeContainerAllowed(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
@@ -1296,7 +1302,6 @@ func TestPrivilegeContainerAllowed(t *testing.T) {
|
||||
func TestPrivilegedContainerDisallowed(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
capabilities.SetForTests(capabilities.Capabilities{
|
||||
@@ -1320,7 +1325,6 @@ func TestPrivilegedContainerDisallowed(t *testing.T) {
|
||||
func TestNetworkErrorsWithoutHostNetwork(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
kubelet.runtimeState.setNetworkState(fmt.Errorf("simulated network error"))
|
||||
@@ -1387,7 +1391,6 @@ func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
fakeRuntime := testKubelet.fakeRuntime
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
now := metav1.Now()
|
||||
@@ -1435,7 +1438,6 @@ func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
fakeRuntime := testKubelet.fakeRuntime
|
||||
testKubelet.chainMock()
|
||||
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
@@ -1500,7 +1502,6 @@ func podWithUIDNameNsSpec(uid types.UID, name, namespace string, spec v1.PodSpec
|
||||
func TestDeletePodDirsForDeletedPods(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
kl := testKubelet.kubelet
|
||||
pods := []*v1.Pod{
|
||||
podWithUIDNameNs("12345678", "pod1", "ns"),
|
||||
@@ -1536,7 +1537,6 @@ func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*v1.Pod,
|
||||
func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
kl := testKubelet.kubelet
|
||||
pods := []*v1.Pod{
|
||||
podWithUIDNameNs("12345678", "pod1", "ns"),
|
||||
@@ -1555,7 +1555,6 @@ func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
|
||||
func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
runningPod := &kubecontainer.Pod{
|
||||
ID: "12345678",
|
||||
Name: "pod1",
|
||||
@@ -1615,7 +1614,6 @@ func TestGetPodsToSync(t *testing.T) {
|
||||
func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
kubelet := testKubelet.kubelet
|
||||
numContainers := 10
|
||||
expectedOrder := []string{}
|
||||
@@ -1675,7 +1673,6 @@ func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
|
||||
emptyContainerID := (&kubecontainer.ContainerID{}).String()
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
kubelet := testKubelet.kubelet
|
||||
pod := podWithUIDNameNs("12345678", "foo", "new")
|
||||
pod.Spec = v1.PodSpec{RestartPolicy: v1.RestartPolicyOnFailure}
|
||||
@@ -1862,7 +1859,6 @@ func TestGenerateAPIPodStatusWithDifferentRestartPolicies(t *testing.T) {
|
||||
emptyContainerID := (&kubecontainer.ContainerID{}).String()
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
kubelet := testKubelet.kubelet
|
||||
pod := podWithUIDNameNs("12345678", "foo", "new")
|
||||
containers := []v1.Container{{Name: "succeed"}, {Name: "failed"}}
|
||||
@@ -2026,7 +2022,6 @@ func (a *testPodAdmitHandler) Admit(attrs *lifecycle.PodAdmitAttributes) lifecyc
|
||||
func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
testKubelet.chainMock()
|
||||
kl := testKubelet.kubelet
|
||||
kl.nodeInfo = testNodeInfo{nodes: []*v1.Node{
|
||||
{
|
||||
@@ -2255,6 +2250,22 @@ func runVolumeManager(kubelet *Kubelet) chan struct{} {
|
||||
return stopCh
|
||||
}
|
||||
|
||||
func forEachFeatureGate(t *testing.T, fs []utilfeature.Feature, tf func(t *testing.T)) {
|
||||
for _, fg := range fs {
|
||||
func() {
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled(fg)
|
||||
defer func() {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled))
|
||||
}()
|
||||
|
||||
for _, f := range []bool{true, false} {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f))
|
||||
t.Run(fmt.Sprintf("%v(%t)", fg, f), tf)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// Sort pods by UID.
|
||||
type podsByUID []*v1.Pod
|
||||
|
||||
|
Reference in New Issue
Block a user