Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -11,6 +11,7 @@ filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//test/cmd:all-srcs",
"//test/conformance:all-srcs",
"//test/e2e:all-srcs",
"//test/e2e_kubeadm:all-srcs",

View File

@@ -56,3 +56,5 @@ approvers:
- timothysc
- zmerlynn
- vishh
labels:
- sig/testing

20
vendor/k8s.io/kubernetes/test/cmd/BUILD generated vendored Normal file
View File

@@ -0,0 +1,20 @@
sh_library(
name = "legacy-script",
srcs = glob(["*.sh"]),
data = ["//pkg/kubectl/validation:testdata/v1/validPod.yaml"],
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

4
vendor/k8s.io/kubernetes/test/cmd/OWNERS generated vendored Normal file
View File

@@ -0,0 +1,4 @@
approvers:
- sig-cli-maintainers
reviewers:
- sig-cli

198
vendor/k8s.io/kubernetes/test/cmd/apply.sh generated vendored Executable file
View File

@@ -0,0 +1,198 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# Runs tests related to kubectl apply.
run_kubectl_apply_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl apply"
## kubectl apply should create the resource that doesn't exist yet
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: apply a pod "test-pod" (doesn't exist) should create this pod
kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is created
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete pods test-pod "${kube_flags[@]}"
## kubectl apply should be able to clear defaulted fields.
# Pre-Condition: no deployment exists
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: apply a deployment "test-deployment-retainkeys" (doesn't exist) should create this deployment
kubectl apply -f hack/testdata/retainKeys/deployment/deployment-before.yaml "${kube_flags[@]}"
# Post-Condition: deployment "test-deployment-retainkeys" created
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}{{end}}" 'test-deployment-retainkeys'
# Post-Condition: deployment "test-deployment-retainkeys" has defaulted fields
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep RollingUpdate)" ]]
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep maxSurge)" ]]
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep maxUnavailable)" ]]
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep emptyDir)" ]]
# Command: apply a deployment "test-deployment-retainkeys" should clear
# defaulted fields and successfully update the deployment
[[ "$(kubectl apply -f hack/testdata/retainKeys/deployment/deployment-after.yaml "${kube_flags[@]}")" ]]
# Post-Condition: deployment "test-deployment-retainkeys" has updated fields
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep Recreate)" ]]
! [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep RollingUpdate)" ]]
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep hostPath)" ]]
! [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep emptyDir)" ]]
# Clean up
kubectl delete deployments test-deployment-retainkeys "${kube_flags[@]}"
## kubectl apply -f with label selector should only apply matching objects
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply
kubectl apply -l unique-label=bingbang -f hack/testdata/filter "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field}.name}}" 'selector-test-pod'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "selector-test-pod-dont-apply" not found'
# cleanup
kubectl delete pods selector-test-pod
## kubectl apply --server-dry-run
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply dry-run
kubectl apply --server-dry-run -f hack/testdata/pod.yaml "${kube_flags[@]}"
# No pod exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply non dry-run creates the pod
kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]}"
# apply changes
kubectl apply --server-dry-run -f hack/testdata/pod-apply.yaml "${kube_flags[@]}"
# Post-Condition: label still has initial value
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
# clean-up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## kubectl apply --prune
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply a
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "b" not found'
# apply b
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods a 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "a" not found'
# cleanup
kubectl delete pods b
# same thing without prune for a sanity check
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply a
kubectl apply -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "b" not found'
# apply b
kubectl apply -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]}"
# check both pods exist
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
# check wrong pod doesn't exist
# cleanup
kubectl delete pod/a pod/b
## kubectl apply --prune requires a --all flag to select everything
output_message=$(! kubectl apply --prune -f hack/testdata/prune 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" \
'all resources selected for prune without explicitly passing --all'
# should apply everything
kubectl apply --all --prune -f hack/testdata/prune
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
kubectl delete pod/a pod/b
## kubectl apply --prune should fallback to delete for non reapable types
kubectl apply --all --prune -f hack/testdata/prune-reap/a.yml 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'pvc a-pvc' "{{${id_field}}}" 'a-pvc'
kubectl apply --all --prune -f hack/testdata/prune-reap/b.yml 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'pvc b-pvc' "{{${id_field}}}" 'b-pvc'
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl delete pvc b-pvc 2>&1 "${kube_flags[@]}"
## kubectl apply --prune --prune-whitelist
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply pod a
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# apply svc and don't prune pod a by overwriting whitelist
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml --prune-whitelist core/v1/Service 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc'
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# apply svc and prune pod a with default whitelist
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc'
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# cleanup
kubectl delete svc prune-svc 2>&1 "${kube_flags[@]}"
## kubectl apply -f some.yml --force
# Pre-condition: no service exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
# apply service a
kubectl apply -f hack/testdata/service-revision1.yaml "${kube_flags[@]}"
# check right service exists
kube::test::get_object_assert 'services a' "{{${id_field}}}" 'a'
# change immutable field and apply service a
output_message=$(! kubectl apply -f hack/testdata/service-revision2.yaml 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'field is immutable'
# apply --force to recreate resources for immutable fields
kubectl apply -f hack/testdata/service-revision2.yaml --force "${kube_flags[@]}"
# check immutable field exists
kube::test::get_object_assert 'services a' "{{.spec.clusterIP}}" '10.0.0.12'
# cleanup
kubectl delete -f hack/testdata/service-revision2.yaml "${kube_flags[@]}"
set +o nounset
set +o errexit
}

654
vendor/k8s.io/kubernetes/test/cmd/apps.sh generated vendored Executable file
View File

@@ -0,0 +1,654 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
run_daemonset_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:daemonsets)"
### Create a rolling update DaemonSet
# Pre-condition: no DaemonSet exists
kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
# Template Generation should be 1
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1'
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
# Template Generation should stay 1
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1'
# Test set commands
kubectl set image daemonsets/bind "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '2'
kubectl set env daemonsets/bind "${kube_flags[@]}" foo=bar
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '3'
kubectl set resources daemonsets/bind "${kube_flags[@]}" --limits=cpu=200m,memory=512Mi
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '4'
# Clean up
kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_daemonset_history_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:daemonsets, v1:controllerrevisions)"
### Test rolling back a DaemonSet
# Pre-condition: no DaemonSet or its pods exists
kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create a DaemonSet (revision 1)
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml --record "${kube_flags[@]}"
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset.yaml --record.*"
# Rollback to revision 1 - should be no-op
kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}"
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
# Update the DaemonSet (revision 2)
kubectl apply -f hack/testdata/rollingupdate-daemonset-rv2.yaml --record "${kube_flags[@]}"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset-rv2.yaml --record.*"
# Rollback to revision 1 with dry-run - should be no-op
kubectl rollout undo daemonset --dry-run=true "${kube_flags[@]}"
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
# Rollback to revision 1
kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
# Rollback to revision 1000000 - should fail
output_message=$(! kubectl rollout undo daemonset --to-revision=1000000 "${kube_flags[@]}" 2>&1)
kube::test::if_has_string "${output_message}" "unable to find specified revision"
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
# Rollback to last revision
kubectl rollout undo daemonset "${kube_flags[@]}"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
# Clean up
kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_kubectl_apply_deployments_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl apply deployments"
## kubectl apply should propagate user defined null values
# Pre-Condition: no Deployments, ReplicaSets, Pods exist
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply base deployment
kubectl apply -f hack/testdata/null-propagation/deployment-l1.yaml "${kube_flags[@]}"
# check right deployment exists
kube::test::get_object_assert 'deployments my-depl' "{{${id_field}}}" 'my-depl'
# check right labels exists
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" 'l1'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" 'l1'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" 'l1'
# apply new deployment with new template labels
kubectl apply -f hack/testdata/null-propagation/deployment-l2.yaml "${kube_flags[@]}"
# check right labels exists
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l2}}" 'l2'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l2}}" 'l2'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l2}}" 'l2'
# cleanup
# need to explicitly remove replicasets and pods because we changed the deployment selector and orphaned things
kubectl delete deployments,rs,pods --all --cascade=false --grace-period=0
# Post-Condition: no Deployments, ReplicaSets, Pods exist
kube::test::wait_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::wait_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# kubectl apply deployment --overwrite=true --force=true
# Pre-Condition: no deployment exists
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
# apply deployment nginx
kubectl apply -f hack/testdata/deployment-label-change1.yaml "${kube_flags[@]}"
# check right deployment exists
kube::test::get_object_assert 'deployment nginx' "{{${id_field}}}" 'nginx'
# apply deployment with new labels and a conflicting resourceVersion
output_message=$(! kubectl apply -f hack/testdata/deployment-label-change2.yaml 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'Error from server (Conflict)'
# apply deployment with --force and --overwrite will succeed
kubectl apply -f hack/testdata/deployment-label-change2.yaml --overwrite=true --force=true --grace-period=10
# check the changed deployment
output_message=$(kubectl apply view-last-applied deploy/nginx -o json 2>&1 "${kube_flags[@]}" |grep nginx2)
kube::test::if_has_string "${output_message}" '"name": "nginx2"'
# applying a resource (with --force) that is both conflicting and invalid will
# cause the server to only return a "Conflict" error when we attempt to patch.
# This means that we will delete the existing resource after receiving 5 conflict
# errors in a row from the server, and will attempt to create the modified
# resource that we are passing to "apply". Since the modified resource is also
# invalid, we will receive an invalid error when we attempt to create it, after
# having deleted the old resource. Ensure that when this case is reached, the
# old resource is restored once again, and the validation error is printed.
output_message=$(! kubectl apply -f hack/testdata/deployment-label-change3.yaml --force 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'Invalid value'
# Ensure that the old object has been restored
kube::test::get_object_assert 'deployment nginx' "{{${template_labels}}}" 'nginx2'
# cleanup
kubectl delete deployments --all --grace-period=10
set +o nounset
set +o errexit
}
run_deployment_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing deployments"
# Test kubectl create deployment (using default - old generator)
kubectl create deployment test-nginx-extensions --image=k8s.gcr.io/nginx:test-cmd
# Post-Condition: Deployment "nginx" is created.
kube::test::get_object_assert 'deploy test-nginx-extensions' "{{$container_name_field}}" 'nginx'
# and old generator was used, iow. old defaults are applied
output_message=$(kubectl get deployment.extensions/test-nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_not_string "${output_message}" '2'
# Ensure we can interact with deployments through extensions and apps endpoints
output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'extensions/v1beta1'
output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'apps/v1'
# Clean up
kubectl delete deployment test-nginx-extensions "${kube_flags[@]}"
# Test kubectl create deployment
kubectl create deployment test-nginx-apps --image=k8s.gcr.io/nginx:test-cmd --generator=deployment-basic/apps.v1beta1
# Post-Condition: Deployment "nginx" is created.
kube::test::get_object_assert 'deploy test-nginx-apps' "{{$container_name_field}}" 'nginx'
# and new generator was used, iow. new defaults are applied
output_message=$(kubectl get deployment/test-nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_string "${output_message}" '2'
# Ensure we can interact with deployments through extensions and apps endpoints
output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'extensions/v1beta1'
output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'apps/v1'
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Controlled By" "Replicas:" "Pods Status:" "Volumes:"
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By"
# Clean up
kubectl delete deployment test-nginx-apps "${kube_flags[@]}"
### Test kubectl create deployment with image and command
# Pre-Condition: No deployment exists.
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create deployment nginx-with-command --image=k8s.gcr.io/nginx:test-cmd -- /bin/sleep infinity
# Post-Condition: Deployment "nginx" is created.
kube::test::get_object_assert 'deploy nginx-with-command' "{{$container_name_field}}" 'nginx'
# Clean up
kubectl delete deployment nginx-with-command "${kube_flags[@]}"
### Test kubectl create deployment should not fail validation
# Pre-Condition: No deployment exists.
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/deployment-with-UnixUserID.yaml "${kube_flags[@]}"
# Post-Condition: Deployment "deployment-with-unixuserid" is created.
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'deployment-with-unixuserid:'
# Clean up
kubectl delete deployment deployment-with-unixuserid "${kube_flags[@]}"
### Test cascading deletion
## Test that rs is deleted when deployment is deleted.
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create deployment
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
# Wait for rs to come up.
kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '3'
# Deleting the deployment should delete the rs.
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
## Test that rs is not deleted when deployment is deleted with cascade set to false.
# Pre-condition: no deployment and rs exist
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Create deployment
kubectl create deployment nginx-deployment --image=k8s.gcr.io/nginx:test-cmd
# Wait for rs to come up.
kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
# Delete the deployment with cascade set to false.
kubectl delete deployment nginx-deployment "${kube_flags[@]}" --cascade=false
# Wait for the deployment to be deleted and then verify that rs is not
# deleted.
kube::test::wait_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
# Cleanup
# Find the name of the rs to be deleted.
output_message=$(kubectl get rs "${kube_flags[@]}" -o template --template={{range.items}}{{$id_field}}{{end}})
kubectl delete rs ${output_message} "${kube_flags[@]}"
### Auto scale deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
# autoscale 2~3 pods, no CPU utilization specified
kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]}" --min=2 --max=3
kube::test::get_object_assert 'hpa nginx-deployment' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
# Clean up
# Note that we should delete hpa first, otherwise it may fight with the deployment reaper.
kubectl delete hpa nginx-deployment "${kube_flags[@]}"
kubectl delete deployment.extensions nginx-deployment "${kube_flags[@]}"
### Rollback a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create a deployment (revision 1)
kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to revision 1 - should be no-op
kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Update the deployment (revision 2)
kubectl apply -f hack/testdata/deployment-revision2.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment.extensions "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
# Rollback to revision 1 with dry-run - should be no-op
kubectl rollout undo deployment nginx --dry-run=true "${kube_flags[@]}" | grep "test-cmd"
kube::test::get_object_assert deployment.extensions "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
# Rollback to revision 1
kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}"
sleep 1
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to revision 1000000 - should be no-op
kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to last revision
kubectl rollout undo deployment nginx "${kube_flags[@]}"
sleep 1
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
# Pause the deployment
kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]}"
# A paused deployment cannot be rolled back
! kubectl rollout undo deployment nginx "${kube_flags[@]}"
# Resume the deployment
kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]}"
# The resumed deployment can now be rolled back
kubectl rollout undo deployment nginx "${kube_flags[@]}"
# Check that the new replica set has all old revisions stored in an annotation
newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')"
kubectl get rs "${newrs}" -o yaml | grep "deployment.kubernetes.io/revision-history: 1,3"
# Check that trying to watch the status of a superseded revision returns an error
! kubectl rollout status deployment/nginx --revision=3
cat hack/testdata/deployment-revision1.yaml | ${SED} "s/name: nginx$/name: nginx2/" | kubectl create -f - "${kube_flags[@]}"
# Deletion of both deployments should not be blocked
kubectl delete deployment nginx2 "${kube_flags[@]}"
# Clean up
kubectl delete deployment nginx "${kube_flags[@]}"
### Set image of a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create a deployment
kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set the deployment's image
kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set non-existing container should fail
! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]}"
# Set image of deployments without specifying name
kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set image of a deployment specified by file
kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set image of a local file without talking to the server
kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" --local -o yaml
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set image of all containers of the deployment
kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Set image of all containners of the deployment again when image not change
kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Clean up
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
### Set env of a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create a deployment
kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/configmap.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/secret.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
kube::test::get_object_assert configmap "{{range.items}}{{$id_field}}:{{end}}" 'test-set-env-config:'
kube::test::get_object_assert secret "{{range.items}}{{$id_field}}:{{end}}" 'test-set-env-secret:'
# Set env of deployments by configmap from keys
kubectl set env deployment nginx-deployment --keys=key-2 --from=configmap/test-set-env-config "${kube_flags[@]}"
# Assert correct value in deployment env
kube::test::get_object_assert 'deploy nginx-deployment' "{{ (index (index .spec.template.spec.containers 0).env 0).name}}" 'KEY_2'
# Assert single value in deployment env
kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '1'
# Set env of deployments by configmap
kubectl set env deployment nginx-deployment --from=configmap/test-set-env-config "${kube_flags[@]}"
# Assert all values in deployment env
kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '2'
# Set env of deployments for all container
kubectl set env deployment nginx-deployment env=prod "${kube_flags[@]}"
# Set env of deployments for specific container
kubectl set env deployment nginx-deployment superenv=superprod -c=nginx "${kube_flags[@]}"
# Set env of deployments by secret from keys
kubectl set env deployment nginx-deployment --keys=username --from=secret/test-set-env-secret "${kube_flags[@]}"
# Set env of deployments by secret
kubectl set env deployment nginx-deployment --from=secret/test-set-env-secret "${kube_flags[@]}"
# Remove specific env of deployment
kubectl set env deployment nginx-deployment env-
# Clean up
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
kubectl delete configmap test-set-env-config "${kube_flags[@]}"
kubectl delete secret test-set-env-secret "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_statefulset_history_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:statefulsets, v1:controllerrevisions)"
### Test rolling back a StatefulSet
# Pre-condition: no statefulset or its pods exists
kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create a StatefulSet (revision 1)
kubectl apply -f hack/testdata/rollingupdate-statefulset.yaml --record "${kube_flags[@]}"
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset.yaml --record.*"
# Rollback to revision 1 - should be no-op
kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}"
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
# Update the statefulset (revision 2)
kubectl apply -f hack/testdata/rollingupdate-statefulset-rv2.yaml --record "${kube_flags[@]}"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset-rv2.yaml --record.*"
# Rollback to revision 1 with dry-run - should be no-op
kubectl rollout undo statefulset --dry-run=true "${kube_flags[@]}"
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
# Rollback to revision 1
kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
# Rollback to revision 1000000 - should fail
output_message=$(! kubectl rollout undo statefulset --to-revision=1000000 "${kube_flags[@]}" 2>&1)
kube::test::if_has_string "${output_message}" "unable to find specified revision"
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
# Rollback to last revision
kubectl rollout undo statefulset "${kube_flags[@]}"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
# Clean up - delete newest configuration
kubectl delete -f hack/testdata/rollingupdate-statefulset-rv2.yaml "${kube_flags[@]}"
# Post-condition: no pods from statefulset controller
wait-for-pods-with-label "app=nginx-statefulset" ""
set +o nounset
set +o errexit
}
run_stateful_set_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:statefulsets)"
### Create and stop statefulset, make sure it doesn't leak pods
# Pre-condition: no statefulset exists
kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create statefulset
kubectl create -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}"
### Scale statefulset test with current-replicas and replicas
# Pre-condition: 0 replicas
kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '0'
kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '1'
# Command: Scale up
kubectl scale --current-replicas=0 --replicas=1 statefulset nginx "${kube_flags[@]}"
# Post-condition: 1 replica, named nginx-0
kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '1'
kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '2'
# Typically we'd wait and confirm that N>1 replicas are up, but this framework
# doesn't start the scheduler, so pet-0 will block all others.
# TODO: test robust scaling in an e2e.
wait-for-pods-with-label "app=nginx-statefulset" "nginx-0"
### Clean up
kubectl delete -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}"
# Post-condition: no pods from statefulset controller
wait-for-pods-with-label "app=nginx-statefulset" ""
set +o nounset
set +o errexit
}
run_rs_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:replicasets)"
### Create and stop a replica set, make sure it doesn't leak pods
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kube::log::status "Deleting rs"
kubectl delete rs frontend "${kube_flags[@]}"
# Post-condition: no pods from frontend replica set
kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
### Create and then delete a replica set with cascade=false, make sure it doesn't delete pods.
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
# wait for all 3 pods to be set up
kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
kube::log::status "Deleting rs"
kubectl delete rs frontend "${kube_flags[@]}" --cascade=false
# Wait for the rs to be deleted.
kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Post-condition: All 3 pods still remain from frontend replica set
kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
# Cleanup
kubectl delete pods -l "tier=frontend" "${kube_flags[@]}"
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create replica set frontend from YAML
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
# Post-condition: frontend replica set is created
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Describe command should print detailed information
kube::test::describe_object_assert rs 'frontend' "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
# Describe command should print events information by default
kube::test::describe_object_events_assert rs 'frontend'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert rs 'frontend' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert rs 'frontend' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert rs
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert rs false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert rs true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By"
### Scale replica set frontend with current-replicas and replicas
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
# Command
kubectl scale --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]}"
# Post-condition: 2 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '2'
# Set up three deploy, two deploy have same label
kubectl create -f hack/testdata/scale-deploy-1.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/scale-deploy-2.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/scale-deploy-3.yaml "${kube_flags[@]}"
kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '1'
kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '1'
kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
# Test kubectl scale --selector
kubectl scale deploy --replicas=2 -l run=hello
kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '2'
kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '2'
kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
# Test kubectl scale --all
kubectl scale deploy --replicas=3 --all
kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '3'
kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '3'
kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '3'
# Clean-up
kubectl delete rs frontend "${kube_flags[@]}"
kubectl delete deploy scale-1 scale-2 scale-3 "${kube_flags[@]}"
### Expose replica set as service
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
# Command
kubectl expose rs frontend --port=80 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" '<no value> 80'
# Create a service using service/v1 generator
kubectl expose rs frontend --port=80 --name=frontend-2 --generator=service/v1 "${kube_flags[@]}"
# Post-condition: service exists and the port is named default.
kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" 'default 80'
# Cleanup services
kubectl delete service frontend{,-2} "${kube_flags[@]}"
# Test set commands
# Pre-condition: frontend replica set exists at generation 1
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '1'
kubectl set image rs/frontend "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '2'
kubectl set env rs/frontend "${kube_flags[@]}" foo=bar
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '3'
kubectl set resources rs/frontend "${kube_flags[@]}" --limits=cpu=200m,memory=512Mi
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '4'
### Delete replica set with id
# Pre-condition: frontend replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Command
kubectl delete rs frontend "${kube_flags[@]}"
# Post-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
### Create two replica sets
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/redis-slave-replicaset.yaml "${kube_flags[@]}"
# Post-condition: frontend and redis-slave
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
### Delete multiple replica sets at once
# Pre-condition: frontend and redis-slave
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
# Command
kubectl delete rs frontend redis-slave "${kube_flags[@]}" # delete multiple replica sets at once
# Post-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
if kube::test::if_supports_resource "${horizontalpodautoscalers}" ; then
### Auto scale replica set
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# autoscale 1~2 pods, CPU utilization 70%, replica set specified by file
kubectl autoscale -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale 2~3 pods, no CPU utilization specified, replica set specified by name
kubectl autoscale rs frontend "${kube_flags[@]}" --min=2 --max=3
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale without specifying --max should fail
! kubectl autoscale rs frontend "${kube_flags[@]}"
# Clean up
kubectl delete rs frontend "${kube_flags[@]}"
fi
set +o nounset
set +o errexit
}

81
vendor/k8s.io/kubernetes/test/cmd/authorization.sh generated vendored Executable file
View File

@@ -0,0 +1,81 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
run_authorization_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing authorization"
# check remote authorization endpoint, kubectl doesn't actually display the returned object so this isn't super useful
# but it proves that works
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/sar-v1.json --validate=false
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/sar-v1beta1.json --validate=false
SAR_RESULT_FILE="${KUBE_TEMP}/sar-result.json"
curl -k -H "Content-Type:" http://localhost:8080/apis/authorization.k8s.io/v1beta1/subjectaccessreviews -XPOST -d @test/fixtures/pkg/kubectl/cmd/create/sar-v1beta1.json > "${SAR_RESULT_FILE}"
if grep -q '"allowed": true' "${SAR_RESULT_FILE}"; then
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" returns as expected: $(cat "${SAR_RESULT_FILE}")"
else
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" does not return as expected: $(cat "${SAR_RESULT_FILE}")"
exit 1
fi
rm "${SAR_RESULT_FILE}"
SAR_RESULT_FILE="${KUBE_TEMP}/sar-result.json"
curl -k -H "Content-Type:" http://localhost:8080/apis/authorization.k8s.io/v1/subjectaccessreviews -XPOST -d @test/fixtures/pkg/kubectl/cmd/create/sar-v1.json > "${SAR_RESULT_FILE}"
if grep -q '"allowed": true' "${SAR_RESULT_FILE}"; then
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" returns as expected: $(cat "${SAR_RESULT_FILE}")"
else
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" does not return as expected: $(cat "${SAR_RESULT_FILE}")"
exit 1
fi
rm "${SAR_RESULT_FILE}"
set +o nounset
set +o errexit
}
run_impersonation_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing impersonation"
output_message=$(! kubectl get pods "${kube_flags_with_token[@]}" --as-group=foo 2>&1)
kube::test::if_has_string "${output_message}" 'without impersonating a user'
if kube::test::if_supports_resource "${csr}" ; then
# --as
kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]}" --as=user1
kube::test::get_object_assert 'csr/foo' '{{.spec.username}}' 'user1'
kube::test::get_object_assert 'csr/foo' '{{range .spec.groups}}{{.}}{{end}}' 'system:authenticated'
kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]}"
# --as-group
kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]}" --as=user1 --as-group=group2 --as-group=group1 --as-group=,,,chameleon
kube::test::get_object_assert 'csr/foo' '{{len .spec.groups}}' '3'
kube::test::get_object_assert 'csr/foo' '{{range .spec.groups}}{{.}} {{end}}' 'group2 group1 ,,,chameleon '
kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]}"
fi
set +o nounset
set +o errexit
}

63
vendor/k8s.io/kubernetes/test/cmd/batch.sh generated vendored Executable file
View File

@@ -0,0 +1,63 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
run_job_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing job"
### Create a new namespace
# Pre-condition: the test-jobs namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-jobs\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-jobs
# Post-condition: namespace 'test-jobs' is created.
kube::test::get_object_assert 'namespaces/test-jobs' "{{$id_field}}" 'test-jobs'
### Create a cronjob in a specific namespace
kubectl run pi --schedule="59 23 31 2 *" --namespace=test-jobs --generator=cronjob/v1beta1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
# Post-Condition: assertion object exists
kube::test::get_object_assert 'cronjob/pi --namespace=test-jobs' "{{$id_field}}" 'pi'
kubectl get cronjob/pi --namespace=test-jobs
kubectl describe cronjob/pi --namespace=test-jobs
### Create a job in dry-run mode
output_message=$(kubectl create job test-job --from=cronjob/pi --dry-run=true --namespace=test-jobs -o name)
# Post-condition: The text 'job.batch/test-job' should be part of the output
kube::test::if_has_string "${output_message}" 'job.batch/test-job'
# Post-condition: The test-job wasn't created actually
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}{{end}}" ''
### Create a job in a specific namespace
kubectl create job test-job --from=cronjob/pi --namespace=test-jobs
# Post-Condition: assertion object exists
kube::test::get_object_assert 'job/test-job --namespace=test-jobs' "{{$id_field}}" 'test-job'
kubectl get job/test-job --namespace=test-jobs
kubectl describe job/test-job --namespace=test-jobs
#Clean up
kubectl delete job test-job --namespace=test-jobs
kubectl delete cronjob pi --namespace=test-jobs
kubectl delete namespace test-jobs
set +o nounset
set +o errexit
}

63
vendor/k8s.io/kubernetes/test/cmd/certificate.sh generated vendored Executable file
View File

@@ -0,0 +1,63 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
run_certificates_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing certificates"
# approve
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate approve foo "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Approved'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate approve -f hack/testdata/csr.yml "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Approved'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
# deny
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate deny foo "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Denied'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate deny -f hack/testdata/csr.yml "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Denied'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
set +o nounset
set +o errexit
}

1383
vendor/k8s.io/kubernetes/test/cmd/core.sh generated vendored Executable file

File diff suppressed because it is too large Load Diff

469
vendor/k8s.io/kubernetes/test/cmd/crd.sh generated vendored Executable file
View File

@@ -0,0 +1,469 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
run_crd_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl crd"
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
{
"kind": "CustomResourceDefinition",
"apiVersion": "apiextensions.k8s.io/v1beta1",
"metadata": {
"name": "foos.company.com"
},
"spec": {
"group": "company.com",
"version": "v1",
"scope": "Namespaced",
"names": {
"plural": "foos",
"kind": "Foo"
}
}
}
__EOF__
# Post-Condition: assertion object exist
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{if eq $id_field \\\"foos.company.com\\\"}}{{$id_field}}:{{end}}{{end}}" 'foos.company.com:'
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
{
"kind": "CustomResourceDefinition",
"apiVersion": "apiextensions.k8s.io/v1beta1",
"metadata": {
"name": "bars.company.com"
},
"spec": {
"group": "company.com",
"version": "v1",
"scope": "Namespaced",
"names": {
"plural": "bars",
"kind": "Bar"
}
}
}
__EOF__
# Post-Condition: assertion object exist
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{if eq $id_field \\\"foos.company.com\\\" \\\"bars.company.com\\\"}}{{$id_field}}:{{end}}{{end}}" 'bars.company.com:foos.company.com:'
# This test ensures that the name printer is able to output a resource
# in the proper "kind.group/resource_name" format, and that the
# resource builder is able to resolve a GVK when a kind.group pair is given.
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
{
"kind": "CustomResourceDefinition",
"apiVersion": "apiextensions.k8s.io/v1beta1",
"metadata": {
"name": "resources.mygroup.example.com"
},
"spec": {
"group": "mygroup.example.com",
"version": "v1alpha1",
"scope": "Namespaced",
"names": {
"plural": "resources",
"singular": "resource",
"kind": "Kind",
"listKind": "KindList"
}
}
}
__EOF__
# Post-Condition: assertion crd with non-matching kind and resource exists
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{if eq $id_field \\\"foos.company.com\\\" \\\"bars.company.com\\\" \\\"resources.mygroup.example.com\\\"}}{{$id_field}}:{{end}}{{end}}" 'bars.company.com:foos.company.com:resources.mygroup.example.com:'
# This test ensures that we can create complex validation without client-side validation complaining
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
{
"kind": "CustomResourceDefinition",
"apiVersion": "apiextensions.k8s.io/v1beta1",
"metadata": {
"name": "validfoos.company.com"
},
"spec": {
"group": "company.com",
"version": "v1",
"scope": "Namespaced",
"names": {
"plural": "validfoos",
"kind": "ValidFoo"
},
"validation": {
"openAPIV3Schema": {
"properties": {
"spec": {
"type": "array",
"items": {
"type": "number"
}
}
}
}
}
}
}
__EOF__
# Post-Condition: assertion crd with non-matching kind and resource exists
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{if eq $id_field \\\"foos.company.com\\\" \\\"bars.company.com\\\" \\\"resources.mygroup.example.com\\\" \\\"validfoos.company.com\\\"}}{{$id_field}}:{{end}}{{end}}" 'bars.company.com:foos.company.com:resources.mygroup.example.com:validfoos.company.com:'
run_non_native_resource_tests
# teardown
kubectl delete customresourcedefinitions/foos.company.com "${kube_flags_with_token[@]}"
kubectl delete customresourcedefinitions/bars.company.com "${kube_flags_with_token[@]}"
kubectl delete customresourcedefinitions/resources.mygroup.example.com "${kube_flags_with_token[@]}"
kubectl delete customresourcedefinitions/validfoos.company.com "${kube_flags_with_token[@]}"
set +o nounset
set +o errexit
}
kube::util::non_native_resources() {
local times
local wait
local failed
times=30
wait=10
local i
for i in $(seq 1 $times); do
failed=""
kubectl "${kube_flags[@]}" get --raw '/apis/company.com/v1' || failed=true
kubectl "${kube_flags[@]}" get --raw '/apis/company.com/v1/foos' || failed=true
kubectl "${kube_flags[@]}" get --raw '/apis/company.com/v1/bars' || failed=true
if [ -z "${failed}" ]; then
return 0
fi
sleep ${wait}
done
kube::log::error "Timed out waiting for non-native-resources; tried ${times} waiting ${wait}s between each"
return 1
}
run_non_native_resource_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl non-native resources"
kube::util::non_native_resources
# Test that we can list this new CustomResource (foos)
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can list this new CustomResource (bars)
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can list this new CustomResource (resources)
kube::test::get_object_assert resources "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create a new resource of type Kind
kubectl "${kube_flags[@]}" create -f hack/testdata/CRD/resource.yaml "${kube_flags[@]}"
# Test that -o name returns kind.group/resourcename
output_message=$(kubectl "${kube_flags[@]}" get resource/myobj -o name)
kube::test::if_has_string "${output_message}" 'kind.mygroup.example.com/myobj'
output_message=$(kubectl "${kube_flags[@]}" get resources/myobj -o name)
kube::test::if_has_string "${output_message}" 'kind.mygroup.example.com/myobj'
output_message=$(kubectl "${kube_flags[@]}" get kind.mygroup.example.com/myobj -o name)
kube::test::if_has_string "${output_message}" 'kind.mygroup.example.com/myobj'
# Delete the resource with cascade.
kubectl "${kube_flags[@]}" delete resources myobj --cascade=true
# Make sure it's gone
kube::test::wait_object_assert resources "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create a new resource of type Foo
kubectl "${kube_flags[@]}" create -f hack/testdata/CRD/foo.yaml "${kube_flags[@]}"
# Test that we can list this new custom resource
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test alternate forms
kube::test::get_object_assert foo "{{range.items}}{{$id_field}}:{{end}}" 'test:'
kube::test::get_object_assert foos.company.com "{{range.items}}{{$id_field}}:{{end}}" 'test:'
kube::test::get_object_assert foos.v1.company.com "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test all printers, with lists and individual items
kube::log::status "Testing CustomResource printing"
kubectl "${kube_flags[@]}" get foos
kubectl "${kube_flags[@]}" get foos/test
kubectl "${kube_flags[@]}" get foos -o name
kubectl "${kube_flags[@]}" get foos/test -o name
kubectl "${kube_flags[@]}" get foos -o wide
kubectl "${kube_flags[@]}" get foos/test -o wide
kubectl "${kube_flags[@]}" get foos -o json
kubectl "${kube_flags[@]}" get foos/test -o json
kubectl "${kube_flags[@]}" get foos -o yaml
kubectl "${kube_flags[@]}" get foos/test -o yaml
kubectl "${kube_flags[@]}" get foos -o "jsonpath={.items[*].someField}" --allow-missing-template-keys=false
kubectl "${kube_flags[@]}" get foos/test -o "jsonpath={.someField}" --allow-missing-template-keys=false
kubectl "${kube_flags[@]}" get foos -o "go-template={{range .items}}{{.someField}}{{end}}" --allow-missing-template-keys=false
kubectl "${kube_flags[@]}" get foos/test -o "go-template={{.someField}}" --allow-missing-template-keys=false
output_message=$(kubectl "${kube_flags[@]}" get foos/test -o name)
kube::test::if_has_string "${output_message}" 'foo.company.com/test'
# Test patching
kube::log::status "Testing CustomResource patching"
kubectl "${kube_flags[@]}" patch foos/test -p '{"patched":"value1"}' --type=merge
kube::test::get_object_assert foos/test "{{.patched}}" 'value1'
kubectl "${kube_flags[@]}" patch foos/test -p '{"patched":"value2"}' --type=merge --record
kube::test::get_object_assert foos/test "{{.patched}}" 'value2'
kubectl "${kube_flags[@]}" patch foos/test -p '{"patched":null}' --type=merge --record
kube::test::get_object_assert foos/test "{{.patched}}" '<no value>'
# Get local version
CRD_RESOURCE_FILE="${KUBE_TEMP}/crd-foos-test.json"
kubectl "${kube_flags[@]}" get foos/test -o json > "${CRD_RESOURCE_FILE}"
# cannot apply strategic patch locally
CRD_PATCH_ERROR_FILE="${KUBE_TEMP}/crd-foos-test-error"
! kubectl "${kube_flags[@]}" patch --local -f "${CRD_RESOURCE_FILE}" -p '{"patched":"value3"}' 2> "${CRD_PATCH_ERROR_FILE}"
if grep -q "try --type merge" "${CRD_PATCH_ERROR_FILE}"; then
kube::log::status "\"kubectl patch --local\" returns error as expected for CustomResource: $(cat ${CRD_PATCH_ERROR_FILE})"
else
kube::log::status "\"kubectl patch --local\" returns unexpected error or non-error: $(cat ${CRD_PATCH_ERROR_FILE})"
exit 1
fi
# can apply merge patch locally
kubectl "${kube_flags[@]}" patch --local -f "${CRD_RESOURCE_FILE}" -p '{"patched":"value3"}' --type=merge -o json
# can apply merge patch remotely
kubectl "${kube_flags[@]}" patch --record -f "${CRD_RESOURCE_FILE}" -p '{"patched":"value3"}' --type=merge -o json
kube::test::get_object_assert foos/test "{{.patched}}" 'value3'
rm "${CRD_RESOURCE_FILE}"
rm "${CRD_PATCH_ERROR_FILE}"
# Test labeling
kube::log::status "Testing CustomResource labeling"
kubectl "${kube_flags[@]}" label foos --all listlabel=true
kubectl "${kube_flags[@]}" label foo/test itemlabel=true
# Test annotating
kube::log::status "Testing CustomResource annotating"
kubectl "${kube_flags[@]}" annotate foos --all listannotation=true
kubectl "${kube_flags[@]}" annotate foo/test itemannotation=true
# Test describing
kube::log::status "Testing CustomResource describing"
kubectl "${kube_flags[@]}" describe foos
kubectl "${kube_flags[@]}" describe foos/test
kubectl "${kube_flags[@]}" describe foos | grep listlabel=true
kubectl "${kube_flags[@]}" describe foos | grep itemlabel=true
# Delete the resource with cascade.
kubectl "${kube_flags[@]}" delete foos test --cascade=true
# Make sure it's gone
kube::test::wait_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create a new resource of type Bar
kubectl "${kube_flags[@]}" create -f hack/testdata/CRD/bar.yaml "${kube_flags[@]}"
# Test that we can list this new custom resource
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test that we can watch the resource.
# Start watcher in background with process substitution,
# so we can read from stdout asynchronously.
kube::log::status "Testing CustomResource watching"
exec 3< <(kubectl "${kube_flags[@]}" get bars --request-timeout=1m --watch-only -o name & echo $! ; wait)
local watch_pid
read <&3 watch_pid
# We can't be sure when the watch gets established,
# so keep triggering events (in the background) until something comes through.
local tries=0
while [ ${tries} -lt 10 ]; do
tries=$((tries+1))
kubectl "${kube_flags[@]}" patch bars/test -p "{\"patched\":\"${tries}\"}" --type=merge
sleep 1
done &
local patch_pid=$!
# Wait up to 30s for a complete line of output.
local watch_output
read <&3 -t 30 watch_output
# Stop the watcher and the patch loop.
kill -9 ${watch_pid}
kill -9 ${patch_pid}
kube::test::if_has_string "${watch_output}" 'bar.company.com/test'
# Delete the resource without cascade.
kubectl "${kube_flags[@]}" delete bars test --cascade=false
# Make sure it's gone
kube::test::wait_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create single item via apply
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/foo.yaml
# Test that we have create a foo named test
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test that the field has the expected value
kube::test::get_object_assert foos/test '{{.someField}}' 'field1'
# Test that apply an empty patch doesn't change fields
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/foo.yaml
# Test that the field has the same value after re-apply
kube::test::get_object_assert foos/test '{{.someField}}' 'field1'
# Test that apply has updated the subfield
kube::test::get_object_assert foos/test '{{.nestedField.someSubfield}}' 'subfield1'
# Update a subfield and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/foo-updated-subfield.yaml
# Test that apply has updated the subfield
kube::test::get_object_assert foos/test '{{.nestedField.someSubfield}}' 'modifiedSubfield'
# Test that the field has the expected value
kube::test::get_object_assert foos/test '{{.nestedField.otherSubfield}}' 'subfield2'
# Delete a subfield and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/foo-deleted-subfield.yaml
# Test that apply has deleted the field
kube::test::get_object_assert foos/test '{{.nestedField.otherSubfield}}' '<no value>'
# Test that the field does not exist
kube::test::get_object_assert foos/test '{{.nestedField.newSubfield}}' '<no value>'
# Add a field and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/foo-added-subfield.yaml
# Test that apply has added the field
kube::test::get_object_assert foos/test '{{.nestedField.newSubfield}}' 'subfield3'
# Delete the resource
kubectl "${kube_flags[@]}" delete -f hack/testdata/CRD/foo.yaml
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create list via apply
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/multi-crd-list.yaml
# Test that we have create a foo and a bar from a list
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test-list:'
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test-list:'
# Test that the field has the expected value
kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'
# Test that re-apply an list doesn't change anything
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/multi-crd-list.yaml
# Test that the field has the same value after re-apply
kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'
# Test that the fields have the expected value
kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'
# Update fields and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/multi-crd-list-updated-field.yaml
# Test that apply has updated the fields
kube::test::get_object_assert foos/test-list '{{.someField}}' 'modifiedField'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'modifiedField'
# Test that the field has the expected value
kube::test::get_object_assert foos/test-list '{{.otherField}}' 'field2'
kube::test::get_object_assert bars/test-list '{{.otherField}}' 'field2'
# Delete fields and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/multi-crd-list-deleted-field.yaml
# Test that apply has deleted the fields
kube::test::get_object_assert foos/test-list '{{.otherField}}' '<no value>'
kube::test::get_object_assert bars/test-list '{{.otherField}}' '<no value>'
# Test that the fields does not exist
kube::test::get_object_assert foos/test-list '{{.newField}}' '<no value>'
kube::test::get_object_assert bars/test-list '{{.newField}}' '<no value>'
# Add a field and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/multi-crd-list-added-field.yaml
# Test that apply has added the field
kube::test::get_object_assert foos/test-list '{{.newField}}' 'field3'
kube::test::get_object_assert bars/test-list '{{.newField}}' 'field3'
# Delete the resource
kubectl "${kube_flags[@]}" delete -f hack/testdata/CRD/multi-crd-list.yaml
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
## kubectl apply --prune
# Test that no foo or bar exist
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# apply --prune on foo.yaml that has foo/test
kubectl apply --prune -l pruneGroup=true -f hack/testdata/CRD/foo.yaml "${kube_flags[@]}" --prune-whitelist=company.com/v1/Foo --prune-whitelist=company.com/v1/Bar
# check right crds exist
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# apply --prune on bar.yaml that has bar/test
kubectl apply --prune -l pruneGroup=true -f hack/testdata/CRD/bar.yaml "${kube_flags[@]}" --prune-whitelist=company.com/v1/Foo --prune-whitelist=company.com/v1/Bar
# check right crds exist
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Delete the resource
kubectl "${kube_flags[@]}" delete -f hack/testdata/CRD/bar.yaml
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# Test 'kubectl create' with namespace, and namespace cleanup.
kubectl "${kube_flags[@]}" create namespace non-native-resources
kubectl "${kube_flags[@]}" create -f hack/testdata/CRD/bar.yaml --namespace=non-native-resources
kube::test::get_object_assert bars '{{len .items}}' '1' --namespace=non-native-resources
kubectl "${kube_flags[@]}" delete namespace non-native-resources
# Make sure objects go away.
kube::test::wait_object_assert bars '{{len .items}}' '0' --namespace=non-native-resources
# Make sure namespace goes away.
local tries=0
while kubectl "${kube_flags[@]}" get namespace non-native-resources && [ ${tries} -lt 10 ]; do
tries=$((tries+1))
sleep ${tries}
done
set +o nounset
set +o errexit
}

110
vendor/k8s.io/kubernetes/test/cmd/create.sh generated vendored Executable file
View File

@@ -0,0 +1,110 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# Runs tests related to kubectl create --filename(-f) --selector(-l).
run_kubectl_create_filter_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl create filter"
## kubectl create -f with label selector should only create matching objects
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# create
kubectl create -l unique-label=bingbang -f hack/testdata/filter "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field}.name}}" 'selector-test-pod'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "selector-test-pod-dont-apply" not found'
# cleanup
kubectl delete pods selector-test-pod
set +o nounset
set +o errexit
}
run_kubectl_create_error_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl create with error"
# Passing no arguments to create is an error
! kubectl create
## kubectl create should not panic on empty string lists in a template
ERROR_FILE="${KUBE_TEMP}/validation-error"
kubectl create -f hack/testdata/invalid-rc-with-empty-args.yaml "${kube_flags[@]}" 2> "${ERROR_FILE}" || true
# Post-condition: should get an error reporting the empty string
if grep -q "unknown object type \"nil\" in ReplicationController" "${ERROR_FILE}"; then
kube::log::status "\"kubectl create with empty string list returns error as expected: $(cat ${ERROR_FILE})"
else
kube::log::status "\"kubectl create with empty string list returns unexpected error or non-error: $(cat ${ERROR_FILE})"
exit 1
fi
rm "${ERROR_FILE}"
# Posting a pod to namespaces should fail. Also tests --raw forcing the post location
[ "$( kubectl convert -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -o json | kubectl create "${kube_flags[@]}" --raw /api/v1/namespaces -f - --v=8 2>&1 | grep 'cannot be handled as a Namespace: converting (v1.Pod)')" ]
[ "$( kubectl create "${kube_flags[@]}" --raw /api/v1/namespaces -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml --edit 2>&1 | grep 'raw and --edit are mutually exclusive')" ]
set +o nounset
set +o errexit
}
# Runs kubectl create job tests
run_create_job_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
# Test kubectl create job
kubectl create job test-job --image=k8s.gcr.io/nginx:test-cmd
# Post-Condition: job nginx is created
kube::test::get_object_assert 'job test-job' "{{$image_field0}}" 'k8s.gcr.io/nginx:test-cmd'
# Clean up
kubectl delete job test-job "${kube_flags[@]}"
# Test kubectl create job with command
kubectl create job test-job-pi "--image=$IMAGE_PERL" -- perl -Mbignum=bpi -wle 'print bpi(20)'
kube::test::get_object_assert 'job test-job-pi' "{{$image_field0}}" $IMAGE_PERL
# Clean up
kubectl delete job test-job-pi
# Test kubectl create job from cronjob
# Pre-Condition: create a cronjob
kubectl run test-pi --schedule="* */5 * * *" --generator=cronjob/v1beta1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(10)'
kubectl create job my-pi --from=cronjob/test-pi
# Post-condition: container args contain expected command
output_message=$(kubectl get job my-pi -o go-template='{{(index .spec.template.spec.containers 0).args}}' "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "perl -Mbignum=bpi -wle print bpi(10)"
# Clean up
kubectl delete job my-pi
kubectl delete cronjob test-pi
set +o nounset
set +o errexit
}

56
vendor/k8s.io/kubernetes/test/cmd/diff.sh generated vendored Executable file
View File

@@ -0,0 +1,56 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# Runs tests for kubectl alpha diff
run_kubectl_diff_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl alpha diff"
# Test that it works when the live object doesn't exist
output_message=$(kubectl alpha diff LOCAL LIVE -f hack/testdata/pod.yaml)
kube::test::if_has_string "${output_message}" 'test-pod'
kubectl apply -f hack/testdata/pod.yaml
# Ensure that selfLink has been added, and shown in the diff
output_message=$(kubectl alpha diff -f hack/testdata/pod.yaml)
kube::test::if_has_string "${output_message}" 'selfLink'
output_message=$(kubectl alpha diff LOCAL LIVE -f hack/testdata/pod.yaml)
kube::test::if_has_string "${output_message}" 'selfLink'
output_message=$(kubectl alpha diff LOCAL MERGED -f hack/testdata/pod.yaml)
kube::test::if_has_string "${output_message}" 'selfLink'
output_message=$(kubectl alpha diff MERGED MERGED -f hack/testdata/pod.yaml)
kube::test::if_empty_string "${output_message}"
output_message=$(kubectl alpha diff LIVE LIVE -f hack/testdata/pod.yaml)
kube::test::if_empty_string "${output_message}"
output_message=$(kubectl alpha diff LAST LAST -f hack/testdata/pod.yaml)
kube::test::if_empty_string "${output_message}"
output_message=$(kubectl alpha diff LOCAL LOCAL -f hack/testdata/pod.yaml)
kube::test::if_empty_string "${output_message}"
kubectl delete -f hack/testdata/pod.yaml
set +o nounset
set +o errexit
}

130
vendor/k8s.io/kubernetes/test/cmd/discovery.sh generated vendored Executable file
View File

@@ -0,0 +1,130 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
run_RESTMapper_evaluation_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing RESTMapper"
RESTMAPPER_ERROR_FILE="${KUBE_TEMP}/restmapper-error"
### Non-existent resource type should give a recognizeable error
# Pre-condition: None
# Command
kubectl get "${kube_flags[@]}" unknownresourcetype 2>${RESTMAPPER_ERROR_FILE} || true
if grep -q "the server doesn't have a resource type" "${RESTMAPPER_ERROR_FILE}"; then
kube::log::status "\"kubectl get unknownresourcetype\" returns error as expected: $(cat ${RESTMAPPER_ERROR_FILE})"
else
kube::log::status "\"kubectl get unknownresourcetype\" returns unexpected error or non-error: $(cat ${RESTMAPPER_ERROR_FILE})"
exit 1
fi
rm "${RESTMAPPER_ERROR_FILE}"
# Post-condition: None
set +o nounset
set +o errexit
}
run_assert_short_name_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing assert short name"
kube::log::status "Testing propagation of short names for resources"
output_message=$(kubectl get --raw=/api/v1)
## test if a short name is exported during discovery
kube::test::if_has_string "${output_message}" '{"name":"configmaps","singularName":"","namespaced":true,"kind":"ConfigMap","verbs":\["create","delete","deletecollection","get","list","patch","update","watch"\],"shortNames":\["cm"\]}'
set +o nounset
set +o errexit
}
run_assert_categories_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing propagation of categories for resources"
output_message=$(kubectl get --raw=/api/v1 | grep -o '"name":"pods"[^}]*}')
kube::test::if_has_string "${output_message}" '"categories":\["all"\]'
set +o nounset
set +o errexit
}
run_resource_aliasing_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing resource aliasing"
kubectl create -f test/e2e/testing-manifests/statefulset/cassandra/controller.yaml "${kube_flags[@]}"
kubectl create -f test/e2e/testing-manifests/statefulset/cassandra/service.yaml "${kube_flags[@]}"
object="all -l'app=cassandra'"
request="{{range.items}}{{range .metadata.labels}}{{.}}:{{end}}{{end}}"
# all 4 cassandra's might not be in the request immediately...
kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:cassandra:cassandra:' || \
kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:cassandra:' || \
kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:'
kubectl delete all -l app=cassandra "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_kubectl_explain_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl(v1:explain)"
kubectl explain pods
# shortcuts work
kubectl explain po
kubectl explain po.status.message
# cronjob work
kubectl explain cronjob
set +o nounset
set +o errexit
}
run_swagger_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing swagger"
# Verify schema
file="${KUBE_TEMP}/schema-v1.json"
curl -s "http://127.0.0.1:${API_PORT}/swaggerapi/api/v1" > "${file}"
[[ "$(grep "list of returned" "${file}")" ]]
[[ "$(grep "List of services" "${file}")" ]]
[[ "$(grep "Watch for changes to the described resources" "${file}")" ]]
set +o nounset
set +o errexit
}

472
vendor/k8s.io/kubernetes/test/cmd/generic-resources.sh generated vendored Executable file
View File

@@ -0,0 +1,472 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
run_multi_resources_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:multiple resources)"
FILES="hack/testdata/multi-resource-yaml
hack/testdata/multi-resource-list
hack/testdata/multi-resource-json
hack/testdata/multi-resource-rclist
hack/testdata/multi-resource-svclist"
YAML=".yaml"
JSON=".json"
for file in $FILES; do
if [ -f $file$YAML ]
then
file=$file$YAML
replace_file="${file%.yaml}-modify.yaml"
else
file=$file$JSON
replace_file="${file%.json}-modify.json"
fi
has_svc=true
has_rc=true
two_rcs=false
two_svcs=false
if [[ "${file}" == *rclist* ]]; then
has_svc=false
two_rcs=true
fi
if [[ "${file}" == *svclist* ]]; then
has_rc=false
two_svcs=true
fi
### Create, get, describe, replace, label, annotate, and then delete service nginxsvc and replication controller my-nginx from 5 types of files:
### 1) YAML, separated by ---; 2) JSON, with a List type; 3) JSON, with JSON object concatenation
### 4) JSON, with a ReplicationControllerList type; 5) JSON, with a ServiceList type
echo "Testing with file ${file} and replace with file ${replace_file}"
# Pre-condition: no service (other than default kubernetes services) or replication controller exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f "${file}" "${kube_flags[@]}"
# Post-condition: mock service (and mock2) exists
if [ "$has_svc" = true ]; then
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'mock:mock2:'
else
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
fi
fi
# Post-condition: mock rc (and mock2) exists
if [ "$has_rc" = true ]; then
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:mock2:'
else
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
fi
fi
# Command
kubectl get -f "${file}" "${kube_flags[@]}"
# Command: watching multiple resources should return "not supported" error
WATCH_ERROR_FILE="${KUBE_TEMP}/kubectl-watch-error"
kubectl get -f "${file}" "${kube_flags[@]}" "--watch" 2> ${WATCH_ERROR_FILE} || true
if ! grep -q "watch is only supported on individual resources and resource collections" "${WATCH_ERROR_FILE}"; then
kube::log::error_exit "kubectl watch multiple resource returns unexpected error or non-error: $(cat ${WATCH_ERROR_FILE})" "1"
fi
kubectl describe -f "${file}" "${kube_flags[@]}"
# Command
kubectl replace -f $replace_file --force --cascade "${kube_flags[@]}"
# Post-condition: mock service (and mock2) and mock rc (and mock2) are replaced
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'replaced'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'replaced'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'replaced'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'replaced'
fi
fi
# Command: kubectl edit multiple resources
temp_editor="${KUBE_TEMP}/tmp-editor.sh"
echo -e "#!/usr/bin/env bash\n${SED} -i \"s/status\:\ replaced/status\:\ edited/g\" \$@" > "${temp_editor}"
chmod +x "${temp_editor}"
EDITOR="${temp_editor}" kubectl edit "${kube_flags[@]}" -f "${file}"
# Post-condition: mock service (and mock2) and mock rc (and mock2) are edited
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'edited'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'edited'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'edited'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'edited'
fi
fi
# cleaning
rm "${temp_editor}"
# Command
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl label"
# fails on some, but not all, of the resources, retries will fail because it tries to modify
# existing labels.
kubectl-with-retry label -f $file labeled=true --overwrite "${kube_flags[@]}"
# Post-condition: mock service and mock rc (and mock2) are labeled
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${labels_field}.labeled}}" 'true'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${labels_field}.labeled}}" 'true'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${labels_field}.labeled}}" 'true'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.labeled}}" 'true'
fi
fi
# Command
# Command
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl annotate"
# fails on some, but not all, of the resources, retries will fail because it tries to modify
# existing annotations.
kubectl-with-retry annotate -f $file annotated=true --overwrite "${kube_flags[@]}"
# Post-condition: mock service (and mock2) and mock rc (and mock2) are annotated
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${annotations_field}.annotated}}" 'true'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${annotations_field}.annotated}}" 'true'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${annotations_field}.annotated}}" 'true'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${annotations_field}.annotated}}" 'true'
fi
fi
# Cleanup resources created
kubectl delete -f "${file}" "${kube_flags[@]}"
done
#############################
# Multiple Resources via URL#
#############################
# Pre-condition: no service (other than default kubernetes services) or replication controller exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/testdata/multi-resource-yaml.yaml "${kube_flags[@]}"
# Post-condition: service(mock) and rc(mock) exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
# Clean up
kubectl delete -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/testdata/multi-resource-yaml.yaml "${kube_flags[@]}"
# Post-condition: no service (other than default kubernetes services) or replication controller exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_recursive_resources_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing recursive resources"
### Create multiple busybox PODs recursively from directory of YAML files
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl create -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are created, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
## Edit multiple busybox PODs by updating the image field of multiple PODs recursively from a directory. tmp-editor.sh is a fake editor
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
echo -e '#!/usr/bin/env bash\nsed -i "s/image: busybox/image: prom\/busybox/g" $1' > /tmp/tmp-editor.sh
chmod +x /tmp/tmp-editor.sh
output_message=$(! EDITOR=/tmp/tmp-editor.sh kubectl edit -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are not edited, and since busybox2 is malformed, it should error
# The reason why busybox0 & busybox1 PODs are not edited is because the editor tries to load all objects in
# a list but since it contains invalid objects, it will never open.
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'busybox:busybox:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
# cleaning
rm /tmp/tmp-editor.sh
## Replace multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl replace -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are replaced, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:'
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
## Describe multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl describe -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are described, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" "app=busybox0"
kube::test::if_has_string "${output_message}" "app=busybox1"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Annotate multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl annotate -f hack/testdata/recursive/pod annotatekey='annotatevalue' --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are annotated, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{${annotations_field}.annotatekey}}:{{end}}" 'annotatevalue:annotatevalue:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Apply multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl apply -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are updated, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:'
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
### Convert deployment YAML file locally without affecting the live deployment.
# Pre-condition: no deployments exist
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create a deployment (revision 1)
kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Command
output_message=$(kubectl convert --local -f hack/testdata/deployment-revision1.yaml --output-version=apps/v1beta1 -o yaml "${kube_flags[@]}")
# Post-condition: apiVersion is still extensions/v1beta1 in the live deployment, but command output is the new value
kube::test::get_object_assert 'deployment nginx' "{{ .apiVersion }}" 'extensions/v1beta1'
kube::test::if_has_string "${output_message}" "apps/v1beta1"
# Clean up
kubectl delete deployment nginx "${kube_flags[@]}"
## Convert multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl convert -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are converted, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Get multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl get -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}" -o go-template="{{range.items}}{{$id_field}}:{{end}}")
# Post-condition: busybox0 & busybox1 PODs are retrieved, but because busybox2 is malformed, it should not show up
kube::test::if_has_string "${output_message}" "busybox0:busybox1:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Label multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl label -f hack/testdata/recursive/pod mylabel='myvalue' --recursive 2>&1 "${kube_flags[@]}")
echo $output_message
# Post-condition: busybox0 & busybox1 PODs are labeled, but because busybox2 is malformed, it should not show up
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.mylabel}}:{{end}}" 'myvalue:myvalue:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Patch multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl patch -f hack/testdata/recursive/pod -p='{"spec":{"containers":[{"name":"busybox","image":"prom/busybox"}]}}' --recursive 2>&1 "${kube_flags[@]}")
echo $output_message
# Post-condition: busybox0 & busybox1 PODs are patched, but because busybox2 is malformed, it should not show up
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'prom/busybox:prom/busybox:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Delete multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl delete -f hack/testdata/recursive/pod --recursive --grace-period=0 --force 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are deleted, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Create replication controller recursively from directory of YAML files
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}"
# Post-condition: frontend replication controller is created
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
### Autoscale multiple replication controllers recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
# replica each
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
# Command
output_message=$(! kubectl autoscale --min=1 --max=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox replication controllers are autoscaled
# with min. of 1 replica & max of 2 replicas, and since busybox2 is malformed, it should error
kube::test::get_object_assert 'hpa busybox0' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 80'
kube::test::get_object_assert 'hpa busybox1' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 80'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
kubectl delete hpa busybox0 "${kube_flags[@]}"
kubectl delete hpa busybox1 "${kube_flags[@]}"
### Expose multiple replication controllers as service recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
# replica each
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
# Command
output_message=$(! kubectl expose -f hack/testdata/recursive/rc --recursive --port=80 2>&1 "${kube_flags[@]}")
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service busybox0' "{{$port_name}} {{$port_field}}" '<no value> 80'
kube::test::get_object_assert 'service busybox1' "{{$port_name}} {{$port_field}}" '<no value> 80'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Scale multiple replication controllers recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
# replica each
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
# Command
output_message=$(! kubectl scale --current-replicas=1 --replicas=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 replication controllers are scaled to 2 replicas, and since busybox2 is malformed, it should error
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '2'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '2'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Delete multiple busybox replication controllers recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl delete -f hack/testdata/recursive/rc --recursive --grace-period=0 --force 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 replication controllers are deleted, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Rollout on multiple deployments recursively
# Pre-condition: no deployments exist
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create deployments (revision 1) recursively from directory of YAML files
! kubectl create -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx0-deployment:nginx1-deployment:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:"
## Rollback the deployments to revision 1 recursively
output_message=$(! kubectl rollout undo -f hack/testdata/recursive/deployment --recursive --to-revision=1 2>&1 "${kube_flags[@]}")
# Post-condition: nginx0 & nginx1 should be a no-op, and since nginx2 is malformed, it should error
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Pause the deployments recursively
PRESERVE_ERR_FILE=true
kubectl-with-retry rollout pause -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
output_message=$(cat ${ERROR_FILE})
# Post-condition: nginx0 & nginx1 should both have paused set to true, and since nginx2 is malformed, it should error
kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "true:true:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Resume the deployments recursively
kubectl-with-retry rollout resume -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
output_message=$(cat ${ERROR_FILE})
# Post-condition: nginx0 & nginx1 should both have paused set to nothing, and since nginx2 is malformed, it should error
kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "<no value>:<no value>:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Retrieve the rollout history of the deployments recursively
output_message=$(! kubectl rollout history -f hack/testdata/recursive/deployment --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: nginx0 & nginx1 should both have a history, and since nginx2 is malformed, it should error
kube::test::if_has_string "${output_message}" "nginx0-deployment"
kube::test::if_has_string "${output_message}" "nginx1-deployment"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
# Clean up
unset PRESERVE_ERR_FILE
rm "${ERROR_FILE}"
! kubectl delete -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" --grace-period=0 --force
sleep 1
### Rollout on multiple replication controllers recursively - these tests ensure that rollouts cannot be performed on resources that don't support it
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create replication controllers recursively from directory of YAML files
! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}"
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
## Attempt to rollback the replication controllers to revision 1 recursively
output_message=$(! kubectl rollout undo -f hack/testdata/recursive/rc --recursive --to-revision=1 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" 'no rollbacker has been implemented for {"" "ReplicationController"}'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Attempt to pause the replication controllers recursively
output_message=$(! kubectl rollout pause -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" pausing is not supported'
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox1" pausing is not supported'
## Attempt to resume the replication controllers recursively
output_message=$(! kubectl rollout resume -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" resuming is not supported'
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" resuming is not supported'
# Clean up
! kubectl delete -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" --grace-period=0 --force
sleep 1
set +o nounset
set +o errexit
}
run_lists_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:lists)"
### Create a List with objects from multiple versions
# Command
kubectl create -f hack/testdata/list.yaml "${kube_flags[@]}"
### Delete the List with objects from multiple versions
# Command
kubectl delete service/list-service-test deployment/list-deployment-test
set +o nounset
set +o errexit
}

359
vendor/k8s.io/kubernetes/test/cmd/get.sh generated vendored Executable file
View File

@@ -0,0 +1,359 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
run_kubectl_get_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl get"
### Test retrieval of non-existing pods
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}")
# Post-condition: POD abc should error since it doesn't exist
kube::test::if_has_string "${output_message}" 'pods "abc" not found'
### Test retrieval of non-existing POD with output flag specified
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}" -o name)
# Post-condition: POD abc should error since it doesn't exist
kube::test::if_has_string "${output_message}" 'pods "abc" not found'
### Test retrieval of pods when none exist with non-human readable output format flag specified
# Pre-condition: no pods exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o json)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o yaml)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o name)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o jsonpath='{.items}')
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o go-template='{{.items}}')
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o custom-columns=NAME:.metadata.name)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
### Test retrieval of pods when none exist, with human-readable output format flag specified
# Pre-condition: no pods exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl get foobar 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should not be part of the output when an error occurs
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods --ignore-not-found 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o wide)
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
### Test retrieval of non-existing POD with json output flag specified
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}" -o json)
# Post-condition: POD abc should error since it doesn't exist
kube::test::if_has_string "${output_message}" 'pods "abc" not found'
# Post-condition: make sure we don't display an empty List
if kube::test::if_has_string "${output_message}" 'List'; then
echo 'Unexpected List output'
echo "${LINENO} $(basename $0)"
exit 1
fi
### Test kubectl get all
output_message=$(kubectl --v=6 --namespace default get all --chunk-size=0 2>&1 "${kube_flags[@]}")
# Post-condition: Check if we get 200 OK from all the url(s)
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/pods 200 OK"
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/replicationcontrollers 200 OK"
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/services 200 OK"
kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/daemonsets 200 OK"
kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/deployments 200 OK"
kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/replicasets 200 OK"
kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/statefulsets 200 OK"
kube::test::if_has_string "${output_message}" "/apis/autoscaling/v1/namespaces/default/horizontalpodautoscalers 200"
kube::test::if_has_string "${output_message}" "/apis/batch/v1/namespaces/default/jobs 200 OK"
kube::test::if_has_not_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/daemonsets 200 OK"
kube::test::if_has_not_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/deployments 200 OK"
kube::test::if_has_not_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/replicasets 200 OK"
### Test kubectl get chunk size
output_message=$(kubectl --v=6 get clusterrole --chunk-size=10 2>&1 "${kube_flags[@]}")
# Post-condition: Check if we get a limit and continue
kube::test::if_has_string "${output_message}" "/clusterroles?limit=10 200 OK"
kube::test::if_has_string "${output_message}" "/v1/clusterroles?continue="
### Test kubectl get chunk size defaults to 500
output_message=$(kubectl --v=6 get clusterrole 2>&1 "${kube_flags[@]}")
# Post-condition: Check if we get a limit and continue
kube::test::if_has_string "${output_message}" "/clusterroles?limit=500 200 OK"
### Test kubectl get chunk size does not result in a --watch error when resource list is served in multiple chunks
# Pre-condition: no ConfigMaps exist
kube::test::get_object_assert configmap "{{range.items}}{{$id_field}}:{{end}}" ''
# Post-condition: Create three configmaps and ensure that we can --watch them with a --chunk-size of 1
kubectl create cm one "${kube_flags[@]}"
kubectl create cm two "${kube_flags[@]}"
kubectl create cm three "${kube_flags[@]}"
output_message=$(kubectl get configmap --chunk-size=1 --watch --request-timeout=1s 2>&1 "${kube_flags[@]}")
kube::test::if_has_not_string "${output_message}" "watch is only supported on individual resources"
output_message=$(kubectl get configmap --chunk-size=1 --watch-only --request-timeout=1s 2>&1 "${kube_flags[@]}")
kube::test::if_has_not_string "${output_message}" "watch is only supported on individual resources"
### Test --allow-missing-template-keys
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kubectl get "${kube_flags[@]}" pods -o json
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
## check --allow-missing-template-keys defaults to true for jsonpath templates
kubectl get "${kube_flags[@]}" pod valid-pod -o jsonpath='{.missing}'
## check --allow-missing-template-keys defaults to true for go templates
kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{.missing}}'
## check --template flag causes go-template to be printed, even when no --output value is provided
output_message=$(kubectl get "${kube_flags[@]}" pod valid-pod --template="{{$id_field}}:")
kube::test::if_has_string "${output_message}" 'valid-pod:'
## check --allow-missing-template-keys=false results in an error for a missing key with jsonpath
output_message=$(! kubectl get pod valid-pod --allow-missing-template-keys=false -o jsonpath='{.missing}' "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'missing is not found'
## check --allow-missing-template-keys=false results in an error for a missing key with go
output_message=$(! kubectl get pod valid-pod --allow-missing-template-keys=false -o go-template='{{.missing}}' "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'map has no entry for key "missing"'
### Test kubectl get watch
output_message=$(kubectl get pods -w --request-timeout=1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'STATUS' # headers
kube::test::if_has_string "${output_message}" 'valid-pod' # pod details
output_message=$(kubectl get pods/valid-pod -o name -w --request-timeout=1 "${kube_flags[@]}")
kube::test::if_has_not_string "${output_message}" 'STATUS' # no headers
kube::test::if_has_string "${output_message}" 'pod/valid-pod' # resource name
output_message=$(kubectl get pods/valid-pod -o yaml -w --request-timeout=1 "${kube_flags[@]}")
kube::test::if_has_not_string "${output_message}" 'STATUS' # no headers
kube::test::if_has_string "${output_message}" 'name: valid-pod' # yaml
output_message=$(! kubectl get pods/invalid-pod -w --request-timeout=1 "${kube_flags[@]}" 2>&1)
kube::test::if_has_string "${output_message}" '"invalid-pod" not found'
# cleanup
kubectl delete pods valid-pod "${kube_flags[@]}"
### Test 'kubectl get -f <file> -o <non default printer>' prints all the items in the file's list
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml "${kube_flags[@]}"
# Post-condition: PODs redis-master and valid-pod exist
# Check that all items in the list are printed
output_message=$(kubectl get -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml -o jsonpath="{..metadata.name}" "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "redis-master valid-pod"
# cleanup
kubectl delete pods redis-master valid-pod "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_retrieve_multiple_tests() {
set -o nounset
set -o errexit
# switch back to the default namespace
kubectl config set-context "${CONTEXT}" --namespace=""
kube::log::status "Testing kubectl(v1:multiget)"
kube::test::get_object_assert 'nodes/127.0.0.1 service/kubernetes' "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:kubernetes:'
set +o nounset
set +o errexit
}
run_kubectl_sort_by_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl --sort-by"
### sort-by should not panic if no pod exists
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl get pods --sort-by="{metadata.name}"
kubectl get pods --sort-by="{metadata.creationTimestamp}"
### sort-by should works if pod exists
# Create POD
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Check output of sort-by
output_message=$(kubectl get pods --sort-by="{metadata.name}")
kube::test::if_has_string "${output_message}" "valid-pod"
# ensure sort-by receivers objects as Table
output_message=$(kubectl get pods --v=8 --sort-by="{metadata.name}" 2>&1)
kube::test::if_has_string "${output_message}" "as=Table"
# ensure sort-by requests the full object
kube::test::if_has_string "${output_message}" "includeObject=Object"
### Clean up
# Pre-condition: valid-pod exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete "${kube_flags[@]}" pod valid-pod --grace-period=0 --force
# Post-condition: valid-pod doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### sort-by should works by sorting by name
# Create three PODs
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f hack/testdata/sorted-pods/sorted-pod1.yaml
# Post-condition: sorted-pod1 is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:'
# Command
kubectl create "${kube_flags[@]}" -f hack/testdata/sorted-pods/sorted-pod2.yaml
# Post-condition: sorted-pod1 is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:sorted-pod2:'
# Command
kubectl create "${kube_flags[@]}" -f hack/testdata/sorted-pods/sorted-pod3.yaml
# Post-condition: sorted-pod1 is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:sorted-pod2:sorted-pod3:'
# Check output of sort-by '{metadata.name}'
output_message=$(kubectl get pods --sort-by="{metadata.name}")
kube::test::if_sort_by_has_correct_order "${output_message}" "sorted-pod1:sorted-pod2:sorted-pod3:"
# Check output of sort-by '{metadata.labels.name}'
output_message=$(kubectl get pods --sort-by="{metadata.labels.name}")
kube::test::if_sort_by_has_correct_order "${output_message}" "sorted-pod3:sorted-pod2:sorted-pod1:"
# if sorting, we should be able to use any field in our objects
output_message=$(kubectl get pods --sort-by="{spec.containers[0].name}")
kube::test::if_sort_by_has_correct_order "${output_message}" "sorted-pod2:sorted-pod1:sorted-pod3:"
# ensure sorting by creation timestamps works
output_message=$(kubectl get pods --sort-by="{metadata.creationTimestamp}")
kube::test::if_sort_by_has_correct_order "${output_message}" "sorted-pod1:sorted-pod2:sorted-pod3:"
# ensure sorting using fallback codepath still works
output_message=$(kubectl get pods --sort-by="{spec.containers[0].name}" --server-print=false --v=8 2>&1)
kube::test::if_sort_by_has_correct_order "${output_message}" "sorted-pod2:sorted-pod1:sorted-pod3:"
kube::test::if_has_not_string "${output_message}" "Table"
### Clean up
# Pre-condition: valid-pod exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:sorted-pod2:sorted-pod3:'
# Command
kubectl delete "${kube_flags[@]}" pod --grace-period=0 --force --all
# Post-condition: valid-pod doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_kubectl_all_namespace_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl --all-namespace"
# Pre-condition: the "default" namespace exists
kube::test::get_object_assert namespaces "{{range.items}}{{if eq $id_field \\\"default\\\"}}{{$id_field}}:{{end}}{{end}}" 'default:'
### Create POD
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Verify a specific namespace is ignored when all-namespaces is provided
# Command
kubectl get pods --all-namespaces --namespace=default
### Check --all-namespaces option shows namespaces
# Create objects in multiple namespaces
kubectl create "${kube_flags[@]}" namespace all-ns-test-1
kubectl create "${kube_flags[@]}" serviceaccount test -n all-ns-test-1
kubectl create "${kube_flags[@]}" namespace all-ns-test-2
kubectl create "${kube_flags[@]}" serviceaccount test -n all-ns-test-2
# Ensure listing across namespaces displays the namespace
output_message=$(kubectl get serviceaccounts --all-namespaces "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "all-ns-test-1"
kube::test::if_has_string "${output_message}" "all-ns-test-2"
# Clean up
kubectl delete "${kube_flags[@]}" namespace all-ns-test-1
kubectl delete "${kube_flags[@]}" namespace all-ns-test-2
### Clean up
# Pre-condition: valid-pod exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete "${kube_flags[@]}" pod valid-pod --grace-period=0 --force
# Post-condition: valid-pod doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Verify flag all-namespaces is ignored for rootScoped resources
# Pre-condition: node exists
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
# Command
output_message=$(kubectl get nodes --all-namespaces 2>&1)
# Post-condition: output with no NAMESPACE field
kube::test::if_has_not_string "${output_message}" "NAMESPACE"
set +o nounset
set +o errexit
}

285
vendor/k8s.io/kubernetes/test/cmd/initializers.sh generated vendored Executable file
View File

@@ -0,0 +1,285 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
run_initializer_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing --include-uninitialized"
### Create a deployment
kubectl create --request-timeout=1 -f hack/testdata/initializer-deployments.yaml 2>&1 "${kube_flags[@]}" || true
### Test kubectl get --include-uninitialized
# Command
output_message=$(kubectl get deployments 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get deployments --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get deployments --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: I assume "web" is the deployment name
kube::test::if_has_string "${output_message}" 'web'
# Command
output_message=$(kubectl get deployments web 2>&1 "${kube_flags[@]}")
# Post-condition: I assume "web" is the deployment name
kube::test::if_has_string "${output_message}" 'web'
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
### Test kubectl describe --include-uninitialized
# Command
output_message=$(kubectl describe deployments 2>&1 "${kube_flags[@]}")
# Post-condition: The text "run=web" should be part of the output
kube::test::if_has_string "${output_message}" 'run=web'
# Command
output_message=$(kubectl describe deployments --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: The text "run=web" should be part of the output
kube::test::if_has_string "${output_message}" 'run=web'
# Command
output_message=$(kubectl describe deployments --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl describe deployments web --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: The text "run=web" should be part of the output
kube::test::if_has_string "${output_message}" 'run=web'
# Command
output_message=$(kubectl describe deployments web --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The text "run=web" should be part of the output
kube::test::if_has_string "${output_message}" 'run=web'
### Test kubectl label --include-uninitialized
# Command
output_message=$(kubectl label deployments labelkey1=labelvalue1 --all 2>&1 "${kube_flags[@]}")
# Post-condition: web is labelled
kube::test::if_has_string "${output_message}" 'deployment "web" labeled'
kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey1}}" 'labelvalue1'
# Command
output_message=$(kubectl label deployments labelkey2=labelvalue2 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl label deployments labelkey3=labelvalue3 -l run=web 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl label deployments labelkey4=labelvalue4 -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: web is labelled
kube::test::if_has_string "${output_message}" 'deployment "web" labeled'
kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey4}}" 'labelvalue4'
# Command
output_message=$(kubectl label deployments labelkey5=labelvalue5 -l run=web --all 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl label deployments labelkey6=labelvalue6 -l run=web --all --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: web is labelled
kube::test::if_has_string "${output_message}" 'deployment "web" labeled'
kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey6}}" 'labelvalue6'
# Command
output_message=$(kubectl label deployments web labelkey7=labelvalue7 2>&1 "${kube_flags[@]}")
# Post-condition: web is labelled
kube::test::if_has_string "${output_message}" 'deployment "web" labeled'
kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey7}}" 'labelvalue7'
# Found All Labels
kube::test::get_object_assert 'deployments web' "{{${labels_field}}}" 'map[labelkey1:labelvalue1 labelkey4:labelvalue4 labelkey6:labelvalue6 labelkey7:labelvalue7 run:web]'
### Test kubectl annotate --include-uninitialized
# Command
output_message=$(kubectl annotate deployments annotatekey1=annotatevalue1 --all 2>&1 "${kube_flags[@]}")
# Post-condition: DEPLOYMENT has annotation
kube::test::if_has_string "${output_message}" 'deployment "web" annotated'
kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey1}}" 'annotatevalue1'
# Command
output_message=$(kubectl annotate deployments annotatekey2=annotatevalue2 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl annotate deployments annotatekey3=annotatevalue3 -l run=web 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl annotate deployments annotatekey4=annotatevalue4 -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: DEPLOYMENT has annotation
kube::test::if_has_string "${output_message}" 'deployment "web" annotated'
kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey4}}" 'annotatevalue4'
# Command
output_message=$(kubectl annotate deployments annotatekey5=annotatevalue5 -l run=web --all 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl annotate deployments annotatekey6=annotatevalue6 -l run=web --all --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: DEPLOYMENT has annotation
kube::test::if_has_string "${output_message}" 'deployment "web" annotated'
kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey6}}" 'annotatevalue6'
# Command
output_message=$(kubectl annotate deployments web annotatekey7=annotatevalue7 2>&1 "${kube_flags[@]}")
# Post-condition: web DEPLOYMENT has annotation
kube::test::if_has_string "${output_message}" 'deployment "web" annotated'
kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey7}}" 'annotatevalue7'
### Test kubectl edit --include-uninitialized
[ "$(EDITOR=cat kubectl edit deployments 2>&1 "${kube_flags[@]}" | grep 'edit cancelled, no objects found')" ]
[ "$(EDITOR=cat kubectl edit deployments --include-uninitialized 2>&1 "${kube_flags[@]}" | grep 'Edit cancelled, no changes made.')" ]
### Test kubectl set image --include-uninitialized
# Command
output_message=$(kubectl set image deployments *=nginx:1.11 --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "image updated" should be part of the output
kube::test::if_has_string "${output_message}" 'image updated'
# Command
output_message=$(kubectl set image deployments *=nginx:1.11 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set image deployments *=nginx:1.11 -l run=web 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set image deployments *=nginx:1.12 -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: The text "image updated" should be part of the output
kube::test::if_has_string "${output_message}" 'image updated'
# Command
output_message=$(kubectl set image deployments *=nginx:1.13 -l run=web --include-uninitialized --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "image updated" should be part of the output
kube::test::if_has_string "${output_message}" 'image updated'
### Test kubectl set resources --include-uninitialized
# Command
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "resource requirements updated" should be part of the output
kube::test::if_has_string "${output_message}" 'resource requirements updated'
# Command
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi -l run=web 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=200m,memory=256Mi -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: The text "resource requirements updated" should be part of the output
kube::test::if_has_string "${output_message}" 'resource requirements updated'
# Command
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=512Mi -l run=web --include-uninitialized --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "resource requirements updated" should be part of the output
kube::test::if_has_string "${output_message}" 'resource requirements updated'
### Test kubectl set selector --include-uninitialized
# Create a service with initializer
kubectl create --request-timeout=1 -f hack/testdata/initializer-redis-master-service.yaml 2>&1 "${kube_flags[@]}" || true
# Command
output_message=$(kubectl set selector services role=padawan --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "selector updated" should be part of the output
kube::test::if_has_string "${output_message}" 'selector updated'
# Command
output_message=$(kubectl set selector services role=padawan --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
### Test kubectl set subject --include-uninitialized
# Create a create clusterrolebinding with initializer
kubectl create --request-timeout=1 -f hack/testdata/initializer-clusterrolebinding.yaml 2>&1 "${kube_flags[@]}" || true
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
# Command
output_message=$(kubectl set subject clusterrolebinding --user=foo --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "subjects updated" should be part of the output
kube::test::if_has_string "${output_message}" 'subjects updated'
# Command
output_message=$(kubectl set subject clusterrolebinding --user=foo --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set subject clusterrolebinding --user=foo -l clusterrolebinding=super 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set subject clusterrolebinding --user=foo -l clusterrolebinding=super --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: The text "subjects updated" should be part of the output
kube::test::if_has_string "${output_message}" 'subjects updated'
# Command
output_message=$(kubectl set subject clusterrolebinding --user=foo -l clusterrolebinding=super --include-uninitialized --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "subjects updated" should be part of the output
kube::test::if_has_string "${output_message}" 'subjects updated'
### Test kubectl set serviceaccount --include-uninitialized
# Command
output_message=$(kubectl set serviceaccount deployment serviceaccount1 --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "serviceaccount updated" should be part of the output
kube::test::if_has_string "${output_message}" 'serviceaccount updated'
# Command
output_message=$(kubectl set serviceaccount deployment serviceaccount1 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
### Test kubectl delete --include-uninitialized
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
# Command
output_message=$(kubectl delete clusterrolebinding --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl delete clusterrolebinding --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "deleted" should be part of the output
kube::test::if_has_string "${output_message}" 'deleted'
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.items}}{{$id_field}}:{{end}}" ''
### Test kubectl apply --include-uninitialized
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply pod a
kubectl apply --prune --request-timeout=20 --include-uninitialized=false --all -f hack/testdata/prune/a.yaml "${kube_flags[@]}" 2>&1
# check right pod exists
kube::test::get_object_assert pods/a "{{${id_field}}}" 'a'
# Post-condition: Other uninitialized resources should not be pruned
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" 'web'
kube::test::get_object_assert services/redis-master "{{range.items}}{{$id_field}}:{{end}}" 'redis-master'
# cleanup
kubectl delete pod a
# apply pod a and prune uninitialized deployments web
kubectl apply --prune --request-timeout=20 --all -f hack/testdata/prune/a.yaml "${kube_flags[@]}" 2>&1
# check right pod exists
kube::test::get_object_assert pods/a "{{${id_field}}}" 'a'
# Post-condition: Other uninitialized resources should not be pruned
kube::test::get_object_assert deployments/web "{{range.items}}{{$id_field}}:{{end}}" 'web'
kube::test::get_object_assert services/redis-master "{{range.items}}{{$id_field}}:{{end}}" 'redis-master'
# cleanup
kubectl delete pod a
# apply pod a and prune uninitialized deployments web
kubectl apply --prune --request-timeout=20 --include-uninitialized --all -f hack/testdata/prune/a.yaml "${kube_flags[@]}" 2>&1
# check right pod exists
kube::test::get_object_assert pods/a "{{${id_field}}}" 'a'
# Post-condition: Other uninitialized resources should not be pruned
kube::test::get_object_assert deployments/web "{{range.items}}{{$id_field}}:{{end}}" 'web'
kube::test::get_object_assert services/redis-master "{{range.items}}{{$id_field}}:{{end}}" 'redis-master'
# cleanup
kubectl delete pod a
kubectl delete --request-timeout=1 deploy web
kubectl delete --request-timeout=1 service redis-master
set +o nounset
set +o errexit
}

96
vendor/k8s.io/kubernetes/test/cmd/kubeconfig.sh generated vendored Executable file
View File

@@ -0,0 +1,96 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
run_kubectl_config_set_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:config set)"
kubectl config set-cluster test-cluster --server="https://does-not-work"
# Get the api cert and add a comment to avoid flag parsing problems
cert_data=$(echo "#Comment" && cat "${TMPDIR:-/tmp}/apiserver.crt")
kubectl config set clusters.test-cluster.certificate-authority-data "$cert_data" --set-raw-bytes
r_written=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}')
encoded=$(echo -n "$cert_data" | base64)
kubectl config set clusters.test-cluster.certificate-authority-data "$encoded"
e_written=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}')
test "$e_written" == "$r_written"
set +o nounset
set +o errexit
}
run_client_config_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing client config"
# Command
# Pre-condition: kubeconfig "missing" is not a file or directory
output_message=$(! kubectl get pod --context="" --kubeconfig=missing 2>&1)
kube::test::if_has_string "${output_message}" "missing: no such file or directory"
# Pre-condition: kubeconfig "missing" is not a file or directory
# Command
output_message=$(! kubectl get pod --user="" --kubeconfig=missing 2>&1)
# Post-condition: --user contains a valid / empty value, missing config file returns error
kube::test::if_has_string "${output_message}" "missing: no such file or directory"
# Command
output_message=$(! kubectl get pod --cluster="" --kubeconfig=missing 2>&1)
# Post-condition: --cluster contains a "valid" value, missing config file returns error
kube::test::if_has_string "${output_message}" "missing: no such file or directory"
# Pre-condition: context "missing-context" does not exist
# Command
output_message=$(! kubectl get pod --context="missing-context" 2>&1)
kube::test::if_has_string "${output_message}" 'context was not found for specified context: missing-context'
# Post-condition: invalid or missing context returns error
# Pre-condition: cluster "missing-cluster" does not exist
# Command
output_message=$(! kubectl get pod --cluster="missing-cluster" 2>&1)
kube::test::if_has_string "${output_message}" 'no server found for cluster "missing-cluster"'
# Post-condition: invalid or missing cluster returns error
# Pre-condition: user "missing-user" does not exist
# Command
output_message=$(! kubectl get pod --user="missing-user" 2>&1)
kube::test::if_has_string "${output_message}" 'auth info "missing-user" does not exist'
# Post-condition: invalid or missing user returns error
# test invalid config
kubectl config view | sed -E "s/apiVersion: .*/apiVersion: v-1/g" > "${TMPDIR:-/tmp}"/newconfig.yaml
output_message=$(! "${KUBE_OUTPUT_HOSTBIN}/kubectl" get pods --context="" --user="" --kubeconfig="${TMPDIR:-/tmp}"/newconfig.yaml 2>&1)
kube::test::if_has_string "${output_message}" "Error loading config file"
output_message=$(! kubectl get pod --kubeconfig=missing-config 2>&1)
kube::test::if_has_string "${output_message}" 'no such file or directory'
set +o nounset
set +o errexit
}

841
vendor/k8s.io/kubernetes/test/cmd/legacy-script.sh generated vendored Executable file
View File

@@ -0,0 +1,841 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This contains util code for testing kubectl.
set -o errexit
set -o nounset
set -o pipefail
# Set locale to ensure english responses from kubectl commands
export LANG=C
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
# Expects the following has already been done by whatever sources this script
# source "${KUBE_ROOT}/hack/lib/init.sh"
# source "${KUBE_ROOT}/hack/lib/test.sh"
source "${KUBE_ROOT}/test/cmd/apply.sh"
source "${KUBE_ROOT}/test/cmd/apps.sh"
source "${KUBE_ROOT}/test/cmd/authorization.sh"
source "${KUBE_ROOT}/test/cmd/batch.sh"
source "${KUBE_ROOT}/test/cmd/certificate.sh"
source "${KUBE_ROOT}/test/cmd/core.sh"
source "${KUBE_ROOT}/test/cmd/crd.sh"
source "${KUBE_ROOT}/test/cmd/create.sh"
source "${KUBE_ROOT}/test/cmd/diff.sh"
source "${KUBE_ROOT}/test/cmd/discovery.sh"
source "${KUBE_ROOT}/test/cmd/generic-resources.sh"
source "${KUBE_ROOT}/test/cmd/get.sh"
source "${KUBE_ROOT}/test/cmd/initializers.sh"
source "${KUBE_ROOT}/test/cmd/kubeconfig.sh"
source "${KUBE_ROOT}/test/cmd/node-management.sh"
source "${KUBE_ROOT}/test/cmd/old-print.sh"
source "${KUBE_ROOT}/test/cmd/plugins.sh"
source "${KUBE_ROOT}/test/cmd/proxy.sh"
source "${KUBE_ROOT}/test/cmd/rbac.sh"
source "${KUBE_ROOT}/test/cmd/request-timeout.sh"
source "${KUBE_ROOT}/test/cmd/run.sh"
source "${KUBE_ROOT}/test/cmd/save-config.sh"
source "${KUBE_ROOT}/test/cmd/storage.sh"
source "${KUBE_ROOT}/test/cmd/template-output.sh"
source "${KUBE_ROOT}/test/cmd/version.sh"
ETCD_HOST=${ETCD_HOST:-127.0.0.1}
ETCD_PORT=${ETCD_PORT:-2379}
API_PORT=${API_PORT:-8080}
SECURE_API_PORT=${SECURE_API_PORT:-6443}
API_HOST=${API_HOST:-127.0.0.1}
KUBELET_HEALTHZ_PORT=${KUBELET_HEALTHZ_PORT:-10248}
CTLRMGR_PORT=${CTLRMGR_PORT:-10252}
PROXY_HOST=127.0.0.1 # kubectl only serves on localhost.
IMAGE_NGINX="k8s.gcr.io/nginx:1.7.9"
IMAGE_DEPLOYMENT_R1="k8s.gcr.io/nginx:test-cmd" # deployment-revision1.yaml
IMAGE_DEPLOYMENT_R2="$IMAGE_NGINX" # deployment-revision2.yaml
IMAGE_PERL="k8s.gcr.io/perl"
IMAGE_PAUSE_V2="k8s.gcr.io/pause:2.0"
IMAGE_DAEMONSET_R2="k8s.gcr.io/pause:latest"
IMAGE_DAEMONSET_R2_2="k8s.gcr.io/nginx:test-cmd" # rollingupdate-daemonset-rv2.yaml
IMAGE_STATEFULSET_R1="k8s.gcr.io/nginx-slim:0.7"
IMAGE_STATEFULSET_R2="k8s.gcr.io/nginx-slim:0.8"
# Expose kubectl directly for readability
PATH="${KUBE_OUTPUT_HOSTBIN}":$PATH
# Define variables for resource types to prevent typos.
clusterroles="clusterroles"
configmaps="configmaps"
csr="csr"
deployments="deployments"
horizontalpodautoscalers="horizontalpodautoscalers"
metrics="metrics"
namespaces="namespaces"
nodes="nodes"
persistentvolumeclaims="persistentvolumeclaims"
persistentvolumes="persistentvolumes"
pods="pods"
podtemplates="podtemplates"
replicasets="replicasets"
replicationcontrollers="replicationcontrollers"
roles="roles"
secrets="secrets"
serviceaccounts="serviceaccounts"
services="services"
statefulsets="statefulsets"
static="static"
storageclass="storageclass"
subjectaccessreviews="subjectaccessreviews"
selfsubjectaccessreviews="selfsubjectaccessreviews"
customresourcedefinitions="customresourcedefinitions"
daemonsets="daemonsets"
controllerrevisions="controllerrevisions"
job="jobs"
# include shell2junit library
sh2ju="${KUBE_ROOT}/third_party/forked/shell2junit/sh2ju.sh"
if [[ -f "${sh2ju}" ]]; then
source "${sh2ju}"
else
echo "failed to find third_party/forked/shell2junit/sh2ju.sh"
exit 1
fi
# record_command runs the command and records its output/error messages in junit format
# it expects the first to be the name of the command
# Example:
# record_command run_kubectl_tests
#
# WARNING: Variable changes in the command will NOT be effective after record_command returns.
# This is because the command runs in subshell.
function record_command() {
set +o nounset
set +o errexit
local name="$1"
local output="${KUBE_JUNIT_REPORT_DIR:-/tmp/junit-results}"
echo "Recording: ${name}"
echo "Running command: $@"
juLog -output="${output}" -class="test-cmd" -name="${name}" "$@"
if [[ $? -ne 0 ]]; then
echo "Error when running ${name}"
foundError="${foundError}""${name}"", "
fi
set -o nounset
set -o errexit
}
# Stops the running kubectl proxy, if there is one.
function stop-proxy()
{
[[ -n "${PROXY_PORT-}" ]] && kube::log::status "Stopping proxy on port ${PROXY_PORT}"
[[ -n "${PROXY_PID-}" ]] && kill "${PROXY_PID}" 1>&2 2>/dev/null
[[ -n "${PROXY_PORT_FILE-}" ]] && rm -f ${PROXY_PORT_FILE}
PROXY_PID=
PROXY_PORT=
PROXY_PORT_FILE=
}
# Starts "kubect proxy" to test the client proxy. $1: api_prefix
function start-proxy()
{
stop-proxy
PROXY_PORT_FILE=$(mktemp proxy-port.out.XXXXX)
kube::log::status "Starting kubectl proxy on random port; output file in ${PROXY_PORT_FILE}; args: ${1-}"
if [ $# -eq 0 ]; then
kubectl proxy --port=0 --www=. 1>${PROXY_PORT_FILE} 2>&1 &
else
kubectl proxy --port=0 --www=. --api-prefix="$1" 1>${PROXY_PORT_FILE} 2>&1 &
fi
PROXY_PID=$!
PROXY_PORT=
local attempts=0
while [[ -z ${PROXY_PORT} ]]; do
if (( ${attempts} > 9 )); then
kill "${PROXY_PID}"
kube::log::error_exit "Couldn't start proxy. Failed to read port after ${attempts} tries. Got: $(cat ${PROXY_PORT_FILE})"
fi
sleep .5
kube::log::status "Attempt ${attempts} to read ${PROXY_PORT_FILE}..."
PROXY_PORT=$(sed 's/.*Starting to serve on 127.0.0.1:\([0-9]*\)$/\1/'< ${PROXY_PORT_FILE})
attempts=$((attempts+1))
done
kube::log::status "kubectl proxy running on port ${PROXY_PORT}"
# We try checking kubectl proxy 30 times with 1s delays to avoid occasional
# failures.
if [ $# -eq 0 ]; then
kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/healthz" "kubectl proxy"
else
kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/$1/healthz" "kubectl proxy --api-prefix=$1"
fi
}
function cleanup()
{
[[ -n "${APISERVER_PID-}" ]] && kill "${APISERVER_PID}" 1>&2 2>/dev/null
[[ -n "${CTLRMGR_PID-}" ]] && kill "${CTLRMGR_PID}" 1>&2 2>/dev/null
[[ -n "${KUBELET_PID-}" ]] && kill "${KUBELET_PID}" 1>&2 2>/dev/null
stop-proxy
kube::etcd::cleanup
rm -rf "${KUBE_TEMP}"
local junit_dir="${KUBE_JUNIT_REPORT_DIR:-/tmp/junit-results}"
echo "junit report dir:" ${junit_dir}
kube::log::status "Clean up complete"
}
# Executes curl against the proxy. $1 is the path to use, $2 is the desired
# return code. Prints a helpful message on failure.
function check-curl-proxy-code()
{
local status
local -r address=$1
local -r desired=$2
local -r full_address="${PROXY_HOST}:${PROXY_PORT}${address}"
status=$(curl -w "%{http_code}" --silent --output /dev/null "${full_address}")
if [ "${status}" == "${desired}" ]; then
return 0
fi
echo "For address ${full_address}, got ${status} but wanted ${desired}"
return 1
}
# TODO: Remove this function when we do the retry inside the kubectl commands. See #15333.
function kubectl-with-retry()
{
ERROR_FILE="${KUBE_TEMP}/kubectl-error"
preserve_err_file=${PRESERVE_ERR_FILE-false}
for count in {0..3}; do
kubectl "$@" 2> ${ERROR_FILE} || true
if grep -q "the object has been modified" "${ERROR_FILE}"; then
kube::log::status "retry $1, error: $(cat ${ERROR_FILE})"
rm "${ERROR_FILE}"
sleep $((2**count))
else
if [ "$preserve_err_file" != true ] ; then
rm "${ERROR_FILE}"
fi
break
fi
done
}
# Waits for the pods with the given label to match the list of names. Don't call
# this function unless you know the exact pod names, or expect no pods.
# $1: label to match
# $2: list of pod names sorted by name
# Example invocation:
# wait-for-pods-with-label "app=foo" "nginx-0nginx-1"
function wait-for-pods-with-label()
{
local i
for i in $(seq 1 10); do
kubeout=`kubectl get po -l $1 --output=go-template --template='{{range.items}}{{.metadata.name}}{{end}}' --sort-by metadata.name "${kube_flags[@]}"`
if [[ $kubeout = $2 ]]; then
return
fi
echo Waiting for pods: $2, found $kubeout
sleep $i
done
kube::log::error_exit "Timeout waiting for pods with label $1"
}
# Code to be run before running the tests.
setup() {
kube::util::trap_add cleanup EXIT SIGINT
kube::util::ensure-temp-dir
# ensure ~/.kube/config isn't loaded by tests
HOME="${KUBE_TEMP}"
kube::etcd::start
# Find a standard sed instance for use with edit scripts
kube::util::ensure-gnu-sed
kube::log::status "Building kubectl"
make -C "${KUBE_ROOT}" WHAT="cmd/kubectl"
# Check kubectl
kube::log::status "Running kubectl with no options"
"${KUBE_OUTPUT_HOSTBIN}/kubectl"
# TODO: we need to note down the current default namespace and set back to this
# namespace after the tests are done.
kubectl config view
CONTEXT="test"
kubectl config set-context "${CONTEXT}"
kubectl config use-context "${CONTEXT}"
kube::log::status "Setup complete"
}
# Runs all kubectl tests.
# Requires an env var SUPPORTED_RESOURCES which is a comma separated list of
# resources for which tests should be run.
runTests() {
foundError=""
if [ -z "${SUPPORTED_RESOURCES:-}" ]; then
echo "Need to set SUPPORTED_RESOURCES env var. It is a list of resources that are supported and hence should be tested. Set it to (*) to test all resources"
exit 1
fi
kube::log::status "Checking kubectl version"
kubectl version
# Generate a random namespace name, based on the current time (to make
# debugging slightly easier) and a random number. Don't use `date +%N`
# because that doesn't work on OSX.
create_and_use_new_namespace() {
local ns_name
ns_name="namespace-$(date +%s)-${RANDOM}"
kube::log::status "Creating namespace ${ns_name}"
kubectl create namespace "${ns_name}"
kubectl config set-context "${CONTEXT}" --namespace="${ns_name}"
}
kube_flags=(
-s "http://127.0.0.1:${API_PORT}"
)
# token defined in hack/testdata/auth-tokens.csv
kube_flags_with_token=(
-s "https://127.0.0.1:${SECURE_API_PORT}" --token=admin-token --insecure-skip-tls-verify=true
)
if [[ -z "${ALLOW_SKEW:-}" ]]; then
kube_flags+=("--match-server-version")
kube_flags_with_token+=("--match-server-version")
fi
if kube::test::if_supports_resource "${nodes}" ; then
[ "$(kubectl get nodes -o go-template='{{ .apiVersion }}' "${kube_flags[@]}")" == "v1" ]
fi
id_field=".metadata.name"
labels_field=".metadata.labels"
annotations_field=".metadata.annotations"
service_selector_field=".spec.selector"
rc_replicas_field=".spec.replicas"
rc_status_replicas_field=".status.replicas"
rc_container_image_field=".spec.template.spec.containers"
rs_replicas_field=".spec.replicas"
port_field="(index .spec.ports 0).port"
port_name="(index .spec.ports 0).name"
second_port_field="(index .spec.ports 1).port"
second_port_name="(index .spec.ports 1).name"
image_field="(index .spec.containers 0).image"
pod_container_name_field="(index .spec.containers 0).name"
container_name_field="(index .spec.template.spec.containers 0).name"
hpa_min_field=".spec.minReplicas"
hpa_max_field=".spec.maxReplicas"
hpa_cpu_field=".spec.targetCPUUtilizationPercentage"
template_labels=".spec.template.metadata.labels.name"
statefulset_replicas_field=".spec.replicas"
statefulset_observed_generation=".status.observedGeneration"
job_parallelism_field=".spec.parallelism"
deployment_replicas=".spec.replicas"
secret_data=".data"
secret_type=".type"
change_cause_annotation='.*kubernetes.io/change-cause.*'
pdb_min_available=".spec.minAvailable"
pdb_max_unavailable=".spec.maxUnavailable"
generation_field=".metadata.generation"
template_generation_field=".spec.templateGeneration"
container_len="(len .spec.template.spec.containers)"
image_field0="(index .spec.template.spec.containers 0).image"
image_field1="(index .spec.template.spec.containers 1).image"
# Make sure "default" namespace exists.
if kube::test::if_supports_resource "${namespaces}" ; then
output_message=$(kubectl get "${kube_flags[@]}" namespaces)
if [[ ! $(echo "${output_message}" | grep "default") ]]; then
# Create default namespace
kubectl create "${kube_flags[@]}" ns default
fi
fi
# Make sure "kubernetes" service exists.
if kube::test::if_supports_resource "${services}" ; then
# Attempt to create the kubernetes service, tolerating failure (since it might already exist)
kubectl create "${kube_flags[@]}" -f hack/testdata/kubernetes-service.yaml || true
# Require the service to exist (either we created it or the API server did)
kubectl get "${kube_flags[@]}" -f hack/testdata/kubernetes-service.yaml
fi
#########################
# Kubectl version #
#########################
record_command run_kubectl_version_tests
#######################
# kubectl config set #
#######################
record_command run_kubectl_config_set_tests
#######################
# kubectl local proxy #
#######################
record_command run_kubectl_local_proxy_tests
#########################
# RESTMapper evaluation #
#########################
record_command run_RESTMapper_evaluation_tests
# find all resources
kubectl "${kube_flags[@]}" api-resources
# find all namespaced resources that support list by name and get them
kubectl "${kube_flags[@]}" api-resources --verbs=list --namespaced -o name | xargs -n 1 kubectl "${kube_flags[@]}" get -o name
################
# Cluster Role #
################
if kube::test::if_supports_resource "${clusterroles}" ; then
record_command run_clusterroles_tests
fi
########
# Role #
########
if kube::test::if_supports_resource "${roles}" ; then
record_command run_role_tests
fi
#########################
# Assert short name #
#########################
record_command run_assert_short_name_tests
#########################
# Assert categories #
#########################
## test if a category is exported during discovery
if kube::test::if_supports_resource "${pods}" ; then
record_command run_assert_categories_tests
fi
###########################
# POD creation / deletion #
###########################
if kube::test::if_supports_resource "${pods}" ; then
record_command run_pod_tests
fi
if kube::test::if_supports_resource "${pods}" ; then
record_command run_save_config_tests
fi
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_create_error_tests
fi
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_apply_tests
record_command run_kubectl_run_tests
record_command run_kubectl_create_filter_tests
fi
if kube::test::if_supports_resource "${deployments}" ; then
record_command run_kubectl_apply_deployments_tests
fi
################
# Kubectl diff #
################
record_command run_kubectl_diff_tests
###############
# Kubectl get #
###############
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_get_tests
record_command run_kubectl_old_print_tests
fi
######################
# Create #
######################
if kube::test::if_supports_resource "${secrets}" ; then
record_command run_create_secret_tests
fi
##################
# Global timeout #
##################
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_request_timeout_tests
fi
#####################################
# CustomResourceDefinitions #
#####################################
# customresourcedefinitions cleanup after themselves.
if kube::test::if_supports_resource "${customresourcedefinitions}" ; then
record_command run_crd_tests
fi
#################
# Run cmd w img #
#################
if kube::test::if_supports_resource "${deployments}" ; then
record_command run_cmd_with_img_tests
fi
#####################################
# Recursive Resources via directory #
#####################################
if kube::test::if_supports_resource "${pods}" ; then
record_command run_recursive_resources_tests
fi
##############
# Namespaces #
##############
if kube::test::if_supports_resource "${namespaces}" ; then
record_command run_namespace_tests
fi
###########
# Secrets #
###########
if kube::test::if_supports_resource "${namespaces}" ; then
if kube::test::if_supports_resource "${secrets}" ; then
record_command run_secrets_test
fi
fi
######################
# ConfigMap #
######################
if kube::test::if_supports_resource "${namespaces}"; then
if kube::test::if_supports_resource "${configmaps}" ; then
record_command run_configmap_tests
fi
fi
####################
# Client Config #
####################
record_command run_client_config_tests
####################
# Service Accounts #
####################
if kube::test::if_supports_resource "${namespaces}" && kube::test::if_supports_resource "${serviceaccounts}" ; then
record_command run_service_accounts_tests
fi
####################
# Job #
####################
if kube::test::if_supports_resource "${job}" ; then
record_command run_job_tests
record_command run_create_job_tests
fi
#################
# Pod templates #
#################
if kube::test::if_supports_resource "${podtemplates}" ; then
record_command run_pod_templates_tests
fi
############
# Services #
############
if kube::test::if_supports_resource "${services}" ; then
record_command run_service_tests
fi
##################
# DaemonSets #
##################
if kube::test::if_supports_resource "${daemonsets}" ; then
record_command run_daemonset_tests
if kube::test::if_supports_resource "${controllerrevisions}"; then
record_command run_daemonset_history_tests
fi
fi
###########################
# Replication controllers #
###########################
if kube::test::if_supports_resource "${namespaces}" ; then
if kube::test::if_supports_resource "${replicationcontrollers}" ; then
record_command run_rc_tests
fi
fi
######################
# Deployments #
######################
if kube::test::if_supports_resource "${deployments}" ; then
record_command run_deployment_tests
fi
######################
# Replica Sets #
######################
if kube::test::if_supports_resource "${replicasets}" ; then
record_command run_rs_tests
fi
#################
# Stateful Sets #
#################
if kube::test::if_supports_resource "${statefulsets}" ; then
record_command run_stateful_set_tests
if kube::test::if_supports_resource "${controllerrevisions}"; then
record_command run_statefulset_history_tests
fi
fi
######################
# Lists #
######################
if kube::test::if_supports_resource "${services}" ; then
if kube::test::if_supports_resource "${deployments}" ; then
record_command run_lists_tests
fi
fi
######################
# Multiple Resources #
######################
if kube::test::if_supports_resource "${services}" ; then
if kube::test::if_supports_resource "${replicationcontrollers}" ; then
record_command run_multi_resources_tests
fi
fi
######################
# Persistent Volumes #
######################
if kube::test::if_supports_resource "${persistentvolumes}" ; then
record_command run_persistent_volumes_tests
fi
############################
# Persistent Volume Claims #
############################
if kube::test::if_supports_resource "${persistentvolumeclaims}" ; then
record_command run_persistent_volume_claims_tests
fi
############################
# Storage Classes #
############################
if kube::test::if_supports_resource "${storageclass}" ; then
record_command run_storage_class_tests
fi
#########
# Nodes #
#########
if kube::test::if_supports_resource "${nodes}" ; then
record_command run_nodes_tests
fi
########################
# authorization.k8s.io #
########################
if kube::test::if_supports_resource "${subjectaccessreviews}" ; then
record_command run_authorization_tests
fi
# kubectl auth can-i
# kube-apiserver is started with authorization mode AlwaysAllow, so kubectl can-i always returns yes
if kube::test::if_supports_resource "${subjectaccessreviews}" ; then
output_message=$(kubectl auth can-i '*' '*' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "yes"
output_message=$(kubectl auth can-i get pods --subresource=log 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "yes"
output_message=$(kubectl auth can-i get invalid_resource 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "the server doesn't have a resource type"
output_message=$(kubectl auth can-i get /logs/ 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "yes"
output_message=$(! kubectl auth can-i get /logs/ --subresource=log 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "subresource can not be used with NonResourceURL"
output_message=$(kubectl auth can-i list jobs.batch/bar -n foo --quiet 2>&1 "${kube_flags[@]}")
kube::test::if_empty_string "${output_message}"
output_message=$(kubectl auth can-i get pods --subresource=log 2>&1 "${kube_flags[@]}"; echo $?)
kube::test::if_has_string "${output_message}" '0'
output_message=$(kubectl auth can-i get pods --subresource=log --quiet 2>&1 "${kube_flags[@]}"; echo $?)
kube::test::if_has_string "${output_message}" '0'
fi
# kubectl auth reconcile
if kube::test::if_supports_resource "${clusterroles}" ; then
kubectl auth reconcile "${kube_flags[@]}" -f test/fixtures/pkg/kubectl/cmd/auth/rbac-resource-plus.yaml
kube::test::get_object_assert 'rolebindings -n some-other-random -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-RB:'
kube::test::get_object_assert 'roles -n some-other-random -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-R:'
kube::test::get_object_assert 'clusterrolebindings -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-CRB:'
kube::test::get_object_assert 'clusterroles -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-CR:'
failure_message=$(! kubectl auth reconcile "${kube_flags[@]}" -f test/fixtures/pkg/kubectl/cmd/auth/rbac-v1beta1.yaml 2>&1 )
kube::test::if_has_string "${failure_message}" 'only rbac.authorization.k8s.io/v1 is supported'
kubectl delete "${kube_flags[@]}" rolebindings,role,clusterroles,clusterrolebindings -n some-other-random -l test-cmd=auth
fi
#####################
# Retrieve multiple #
#####################
if kube::test::if_supports_resource "${nodes}" ; then
if kube::test::if_supports_resource "${services}" ; then
record_command run_retrieve_multiple_tests
fi
fi
#####################
# Resource aliasing #
#####################
if kube::test::if_supports_resource "${services}" ; then
if kube::test::if_supports_resource "${replicationcontrollers}" ; then
record_command run_resource_aliasing_tests
fi
fi
###########
# Explain #
###########
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_explain_tests
fi
###########
# Swagger #
###########
record_command run_swagger_tests
#####################
# Kubectl --sort-by #
#####################
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_sort_by_tests
fi
############################
# Kubectl --all-namespaces #
############################
if kube::test::if_supports_resource "${pods}" ; then
if kube::test::if_supports_resource "${nodes}" ; then
record_command run_kubectl_all_namespace_tests
fi
fi
######################
# kubectl --template #
######################
if kube::test::if_supports_resource "${pods}" ; then
record_command run_template_output_tests
fi
################
# Certificates #
################
if kube::test::if_supports_resource "${csr}" ; then
record_command run_certificates_tests
fi
######################
# Cluster Management #
######################
if kube::test::if_supports_resource "${nodes}" ; then
record_command run_cluster_management_tests
fi
###########
# Plugins #
###########
record_command run_plugins_tests
#################
# Impersonation #
#################
record_command run_impersonation_tests
kube::test::clear_all
if [[ -n "${foundError}" ]]; then
echo "FAILED TESTS: ""${foundError}"
exit 1
fi
}

149
vendor/k8s.io/kubernetes/test/cmd/node-management.sh generated vendored Executable file
View File

@@ -0,0 +1,149 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
run_cluster_management_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing cluster-management commands"
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
# create test pods we can work with
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "test-pod-1",
"labels": {
"e": "f"
}
},
"spec": {
"containers": [
{
"name": "container-1",
"resources": {},
"image": "test-image"
}
]
}
}
__EOF__
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "test-pod-2",
"labels": {
"c": "d"
}
},
"spec": {
"containers": [
{
"name": "container-1",
"resources": {},
"image": "test-image"
}
]
}
}
__EOF__
# taint/untaint
# Pre-condition: node doesn't have dedicated=foo:PreferNoSchedule taint
kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "" # expect no output
# taint can add a taint
kubectl taint node 127.0.0.1 dedicated=foo:PreferNoSchedule
kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "dedicated=foo:PreferNoSchedule"
# taint can remove a taint
kubectl taint node 127.0.0.1 dedicated-
# Post-condition: node doesn't have dedicated=foo:PreferNoSchedule taint
kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "" # expect no output
### kubectl cordon update with --dry-run does not mark node unschedulable
# Pre-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
kubectl cordon "127.0.0.1" --dry-run
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
### kubectl drain update with --dry-run does not mark node unschedulable
# Pre-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
kubectl drain "127.0.0.1" --dry-run
# Post-condition: node still exists, node is still schedulable
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
### kubectl drain with --pod-selector only evicts pods that match the given selector
# Pre-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
# Pre-condition: test-pod-1 and test-pod-2 exist
kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,'
kubectl drain "127.0.0.1" --pod-selector 'e in (f)'
# only "test-pod-1" should have been matched and deleted - test-pod-2 should still exist
kube::test::get_object_assert "pods/test-pod-2" "{{.metadata.name}}" 'test-pod-2'
# delete pod no longer in use
kubectl delete pod/test-pod-2
# Post-condition: node is schedulable
kubectl uncordon "127.0.0.1"
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
### kubectl uncordon update with --dry-run is a no-op
# Pre-condition: node is already schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
response=$(kubectl uncordon "127.0.0.1" --dry-run)
kube::test::if_has_string "${response}" 'already uncordoned'
# Post-condition: node is still schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
### kubectl drain command fails when both --selector and a node argument are given
# Pre-condition: node exists and contains label test=label
kubectl label node "127.0.0.1" "test=label"
kube::test::get_object_assert "nodes 127.0.0.1" '{{.metadata.labels.test}}' 'label'
response=$(! kubectl drain "127.0.0.1" --selector test=label 2>&1)
kube::test::if_has_string "${response}" 'cannot specify both a node name'
### kubectl cordon command fails when no arguments are passed
# Pre-condition: node exists
response=$(! kubectl cordon 2>&1)
kube::test::if_has_string "${response}" 'error\: USAGE\: cordon NODE'
### kubectl cordon selects no nodes with an empty --selector=
# Pre-condition: node "127.0.0.1" is uncordoned
kubectl uncordon "127.0.0.1"
response=$(! kubectl cordon --selector= 2>&1)
kube::test::if_has_string "${response}" 'must provide one or more resources'
# test=label matches our node
response=$(kubectl cordon --selector test=label)
kube::test::if_has_string "${response}" 'node/127.0.0.1 cordoned'
# invalid=label does not match any nodes
response=$(kubectl cordon --selector invalid=label)
kube::test::if_has_not_string "${response}" 'cordoned'
# Post-condition: node "127.0.0.1" is cordoned
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
set +o nounset
set +o errexit
}

140
vendor/k8s.io/kubernetes/test/cmd/old-print.sh generated vendored Executable file
View File

@@ -0,0 +1,140 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
run_kubectl_old_print_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl get --server-print=false"
### Test retrieval of all types in discovery
# Pre-condition: no resources exist
output_message=$(kubectl get pods --server-print=false 2>&1 "${kube_flags[@]}")
# Post-condition: Expect text indicating no resources were found
kube::test::if_has_string "${output_message}" 'No resources found.'
### Test retrieval of pods against server-side printing
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Compare "old" output with experimental output and ensure both are the same
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
expected_output=$(kubectl get pod "${kube_flags[@]}" | awk 'NF{NF--};1')
actual_output=$(kubectl get pod --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1')
kube::test::if_has_string "${actual_output}" "${expected_output}"
# Test printing objects with --use-openapi-print-columns
actual_output=$(kubectl get namespaces --use-openapi-print-columns --v=7 "${kube_flags[@]}" 2>&1)
# it should request full objects (not server-side printing)
kube::test::if_has_not_string "${actual_output}" 'application/json;as=Table'
kube::test::if_has_string "${actual_output}" 'application/json'
### Test retrieval of daemonsets against server-side printing
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
# Post-condition: daemonset is created
kube::test::get_object_assert ds "{{range.items}}{{$id_field}}:{{end}}" 'bind:'
# Compare "old" output with experimental output and ensure both are the same
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
expected_output=$(kubectl get ds "${kube_flags[@]}" | awk 'NF{NF--};1')
actual_output=$(kubectl get ds --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1')
kube::test::if_has_string "${actual_output}" "${expected_output}"
### Test retrieval of replicationcontrollers against server-side printing
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
# Post-condition: frontend replication controller is created
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Compare "old" output with experimental output and ensure both are the same
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
expected_output=$(kubectl get rc "${kube_flags[@]}" | awk 'NF{NF--};1')
actual_output=$(kubectl get rc --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1')
kube::test::if_has_string "${actual_output}" "${expected_output}"
### Test retrieval of replicasets against server-side printing
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
# Post-condition: frontend replica set is created
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Compare "old" output with experimental output and ensure both are the same
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
expected_output=$(kubectl get rs "${kube_flags[@]}" | awk 'NF{NF--};1')
actual_output=$(kubectl get rs --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1')
kube::test::if_has_string "${actual_output}" "${expected_output}"
### Test retrieval of jobs against server-side printing
kubectl run pi --generator=job/v1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
# Post-Condition: assertion object exists
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
# Compare "old" output with experimental output and ensure both are the same
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
expected_output=$(kubectl get jobs/pi "${kube_flags[@]}" | awk 'NF{NF--};1')
actual_output=$(kubectl get jobs/pi --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1')
kube::test::if_has_string "${actual_output}" "${expected_output}"
### Test retrieval of clusterroles against server-side printing
kubectl create "${kube_flags[@]}" clusterrole sample-role --verb=* --resource=pods
# Post-Condition: assertion object exists
kube::test::get_object_assert clusterrole/sample-role "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
# Compare "old" output with experimental output and ensure both are the same
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
expected_output=$(kubectl get clusterroles/sample-role "${kube_flags[@]}" | awk 'NF{NF--};1')
actual_output=$(kubectl get clusterroles/sample-role --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1')
kube::test::if_has_string "${actual_output}" "${expected_output}"
### Test retrieval of crds against server-side printing
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
{
"kind": "CustomResourceDefinition",
"apiVersion": "apiextensions.k8s.io/v1beta1",
"metadata": {
"name": "foos.company.com"
},
"spec": {
"group": "company.com",
"version": "v1",
"scope": "Namespaced",
"names": {
"plural": "foos",
"kind": "Foo"
}
}
}
__EOF__
# Post-Condition: assertion object exists
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{if eq $id_field \\\"foos.company.com\\\"}}{{$id_field}}:{{end}}{{end}}" 'foos.company.com:'
# Test that we can list this new CustomResource
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
# Compare "old" output with experimental output and ensure both are the same
expected_output=$(kubectl get foos "${kube_flags[@]}" | awk 'NF{NF--};1')
actual_output=$(kubectl get foos --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1')
kube::test::if_has_string "${actual_output}" "${expected_output}"
# teardown
kubectl delete customresourcedefinitions/foos.company.com "${kube_flags_with_token[@]}"
kubectl delete clusterroles/sample-role "${kube_flags_with_token[@]}"
kubectl delete jobs pi "${kube_flags[@]}"
kubectl delete rs frontend "${kube_flags[@]}"
kubectl delete rc frontend "${kube_flags[@]}"
kubectl delete ds bind "${kube_flags[@]}"
kubectl delete pod valid-pod "${kube_flags[@]}"
set +o nounset
set +o errexit
}

54
vendor/k8s.io/kubernetes/test/cmd/plugins.sh generated vendored Executable file
View File

@@ -0,0 +1,54 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
run_plugins_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl plugins"
# test plugins that overwrite existing kubectl commands
output_message=$(! PATH=${PATH}:"test/fixtures/pkg/kubectl/plugins/version" kubectl plugin list 2>&1)
kube::test::if_has_string "${output_message}" 'kubectl-version overwrites existing command: "kubectl version"'
# test plugins that overwrite similarly-named plugins
output_message=$(! PATH=${PATH}:"test/fixtures/pkg/kubectl/plugins:test/fixtures/pkg/kubectl/plugins/foo" kubectl plugin list 2>&1)
kube::test::if_has_string "${output_message}" 'test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo is overshadowed by a similarly named plugin'
# test plugins with no warnings
output_message=$(PATH=${PATH}:"test/fixtures/pkg/kubectl/plugins" kubectl plugin list 2>&1)
kube::test::if_has_string "${output_message}" 'plugins are available'
# no plugins
output_message=$(! PATH=${PATH}:"test/fixtures/pkg/kubectl/plugins/empty" kubectl plugin list 2>&1)
kube::test::if_has_string "${output_message}" 'unable to find any kubectl plugins in your PATH'
# attempt to run a plugin in the user's PATH
output_message=$(PATH=${PATH}:"test/fixtures/pkg/kubectl/plugins" kubectl foo)
kube::test::if_has_string "${output_message}" 'plugin foo'
# ensure that a kubectl command supersedes a plugin that overshadows it
output_message=$(PATH=${PATH}:"test/fixtures/pkg/kubectl/plugins/version" kubectl version)
kube::test::if_has_string "${output_message}" 'Client Version'
kube::test::if_has_not_string "${output_message}" 'overshadows an existing plugin'
set +o nounset
set +o errexit
}

56
vendor/k8s.io/kubernetes/test/cmd/proxy.sh generated vendored Executable file
View File

@@ -0,0 +1,56 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
run_kubectl_local_proxy_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl local proxy"
start-proxy
check-curl-proxy-code /api/kubernetes 404
check-curl-proxy-code /api/v1/namespaces 200
if kube::test::if_supports_resource "${metrics}" ; then
check-curl-proxy-code /metrics 200
fi
if kube::test::if_supports_resource "${static}" ; then
check-curl-proxy-code /static/ 200
fi
stop-proxy
# Make sure the in-development api is accessible by default
start-proxy
check-curl-proxy-code /apis 200
check-curl-proxy-code /apis/extensions/ 200
stop-proxy
# Custom paths let you see everything.
start-proxy /custom
check-curl-proxy-code /custom/api/kubernetes 404
check-curl-proxy-code /custom/api/v1/namespaces 200
if kube::test::if_supports_resource "${metrics}" ; then
check-curl-proxy-code /custom/metrics 200
fi
check-curl-proxy-code /custom/api/v1/namespaces 200
stop-proxy
set +o nounset
set +o errexit
}

159
vendor/k8s.io/kubernetes/test/cmd/rbac.sh generated vendored Executable file
View File

@@ -0,0 +1,159 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
run_clusterroles_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing clusterroles"
# make sure the server was properly bootstrapped with clusterroles and bindings
kube::test::get_object_assert clusterroles/cluster-admin "{{.metadata.name}}" 'cluster-admin'
kube::test::get_object_assert clusterrolebindings/cluster-admin "{{.metadata.name}}" 'cluster-admin'
# test `kubectl create clusterrole`
kubectl create "${kube_flags[@]}" clusterrole pod-admin --verb=* --resource=pods
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:'
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
kubectl create "${kube_flags[@]}" clusterrole resource-reader --verb=get,list --resource=pods,deployments.extensions
kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:get:list:'
kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:deployments:'
kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':extensions:'
kubectl create "${kube_flags[@]}" clusterrole resourcename-reader --verb=get,list --resource=pods --resource-name=foo
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.resourceNames}}{{.}}:{{end}}{{end}}" 'foo:'
kubectl create "${kube_flags[@]}" clusterrole url-reader --verb=get --non-resource-url=/logs/* --non-resource-url=/healthz/*
kube::test::get_object_assert clusterrole/url-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:'
kube::test::get_object_assert clusterrole/url-reader "{{range.rules}}{{range.nonResourceURLs}}{{.}}:{{end}}{{end}}" '/logs/\*:/healthz/\*:'
kubectl create "${kube_flags[@]}" clusterrole aggregation-reader --aggregation-rule="foo1=foo2"
kube::test::get_object_assert clusterrole/aggregation-reader "{{$id_field}}" 'aggregation-reader'
# test `kubectl create clusterrolebinding`
# test `kubectl set subject clusterrolebinding`
kubectl create "${kube_flags[@]}" clusterrolebinding super-admin --clusterrole=admin --user=super-admin
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
kubectl set subject "${kube_flags[@]}" clusterrolebinding super-admin --user=foo
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:foo:'
kubectl create "${kube_flags[@]}" clusterrolebinding multi-users --clusterrole=admin --user=user-1 --user=user-2
kube::test::get_object_assert clusterrolebinding/multi-users "{{range.subjects}}{{.name}}:{{end}}" 'user-1:user-2:'
kubectl create "${kube_flags[@]}" clusterrolebinding super-group --clusterrole=admin --group=the-group
kube::test::get_object_assert clusterrolebinding/super-group "{{range.subjects}}{{.name}}:{{end}}" 'the-group:'
kubectl set subject "${kube_flags[@]}" clusterrolebinding super-group --group=foo
kube::test::get_object_assert clusterrolebinding/super-group "{{range.subjects}}{{.name}}:{{end}}" 'the-group:foo:'
kubectl create "${kube_flags[@]}" clusterrolebinding multi-groups --clusterrole=admin --group=group-1 --group=group-2
kube::test::get_object_assert clusterrolebinding/multi-groups "{{range.subjects}}{{.name}}:{{end}}" 'group-1:group-2:'
kubectl create "${kube_flags[@]}" clusterrolebinding super-sa --clusterrole=admin --serviceaccount=otherns:sa-name
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:'
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:'
kubectl set subject "${kube_flags[@]}" clusterrolebinding super-sa --serviceaccount=otherfoo:foo
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:otherfoo:'
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:foo:'
# test `kubectl set subject clusterrolebinding --all`
kubectl set subject "${kube_flags[@]}" clusterrolebinding --all --user=test-all-user
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:foo:test-all-user:'
kube::test::get_object_assert clusterrolebinding/super-group "{{range.subjects}}{{.name}}:{{end}}" 'the-group:foo:test-all-user:'
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:foo:test-all-user:'
# test `kubectl create rolebinding`
# test `kubectl set subject rolebinding`
kubectl create "${kube_flags[@]}" rolebinding admin --clusterrole=admin --user=default-admin
kube::test::get_object_assert rolebinding/admin "{{.roleRef.kind}}" 'ClusterRole'
kube::test::get_object_assert rolebinding/admin "{{range.subjects}}{{.name}}:{{end}}" 'default-admin:'
kubectl set subject "${kube_flags[@]}" rolebinding admin --user=foo
kube::test::get_object_assert rolebinding/admin "{{range.subjects}}{{.name}}:{{end}}" 'default-admin:foo:'
kubectl create "${kube_flags[@]}" rolebinding localrole --role=localrole --group=the-group
kube::test::get_object_assert rolebinding/localrole "{{.roleRef.kind}}" 'Role'
kube::test::get_object_assert rolebinding/localrole "{{range.subjects}}{{.name}}:{{end}}" 'the-group:'
kubectl set subject "${kube_flags[@]}" rolebinding localrole --group=foo
kube::test::get_object_assert rolebinding/localrole "{{range.subjects}}{{.name}}:{{end}}" 'the-group:foo:'
kubectl create "${kube_flags[@]}" rolebinding sarole --role=localrole --serviceaccount=otherns:sa-name
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:'
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:'
kubectl set subject "${kube_flags[@]}" rolebinding sarole --serviceaccount=otherfoo:foo
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:otherfoo:'
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:foo:'
# test `kubectl set subject rolebinding --all`
kubectl set subject "${kube_flags[@]}" rolebinding --all --user=test-all-user
kube::test::get_object_assert rolebinding/admin "{{range.subjects}}{{.name}}:{{end}}" 'default-admin:foo:test-all-user:'
kube::test::get_object_assert rolebinding/localrole "{{range.subjects}}{{.name}}:{{end}}" 'the-group:foo:test-all-user:'
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:foo:test-all-user:'
set +o nounset
set +o errexit
}
run_role_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing role"
# Create Role from command (only resource)
kubectl create "${kube_flags[@]}" role pod-admin --verb=* --resource=pods
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:'
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
output_message=$(! kubectl create "${kube_flags[@]}" role invalid-pod-admin --verb=* --resource=invalid-resource 2>&1)
kube::test::if_has_string "${output_message}" "the server doesn't have a resource type \"invalid-resource\""
# Create Role from command (resource + group)
kubectl create "${kube_flags[@]}" role group-reader --verb=get,list --resource=deployments.extensions
kube::test::get_object_assert role/group-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert role/group-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'deployments:'
kube::test::get_object_assert role/group-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" 'extensions:'
output_message=$(! kubectl create "${kube_flags[@]}" role invalid-group --verb=get,list --resource=deployments.invalid-group 2>&1)
kube::test::if_has_string "${output_message}" "the server doesn't have a resource type \"deployments\" in group \"invalid-group\""
# Create Role from command (resource / subresource)
kubectl create "${kube_flags[@]}" role subresource-reader --verb=get,list --resource=pods/status
kube::test::get_object_assert role/subresource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert role/subresource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods/status:'
kube::test::get_object_assert role/subresource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
# Create Role from command (resource + group / subresource)
kubectl create "${kube_flags[@]}" role group-subresource-reader --verb=get,list --resource=replicasets.extensions/scale
kube::test::get_object_assert role/group-subresource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert role/group-subresource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'replicasets/scale:'
kube::test::get_object_assert role/group-subresource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" 'extensions:'
output_message=$(! kubectl create "${kube_flags[@]}" role invalid-group --verb=get,list --resource=rs.invalid-group/scale 2>&1)
kube::test::if_has_string "${output_message}" "the server doesn't have a resource type \"rs\" in group \"invalid-group\""
# Create Role from command (resource + resourcename)
kubectl create "${kube_flags[@]}" role resourcename-reader --verb=get,list --resource=pods --resource-name=foo
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.resourceNames}}{{.}}:{{end}}{{end}}" 'foo:'
# Create Role from command (multi-resources)
kubectl create "${kube_flags[@]}" role resource-reader --verb=get,list --resource=pods/status,deployments.extensions
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:get:list:'
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods/status:deployments:'
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':extensions:'
set +o nounset
set +o errexit
}

57
vendor/k8s.io/kubernetes/test/cmd/request-timeout.sh generated vendored Executable file
View File

@@ -0,0 +1,57 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
run_kubectl_request_timeout_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl request timeout"
### Test global request timeout option
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod POD is created
kubectl get "${kube_flags[@]}" pods -o json
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
## check --request-timeout on 'get pod'
output_message=$(kubectl get pod valid-pod --request-timeout=1)
kube::test::if_has_string "${output_message}" 'valid-pod'
## check --request-timeout on 'get pod' with --watch
output_message=$(kubectl get pod valid-pod --request-timeout=1 --watch 2>&1)
kube::test::if_has_string "${output_message}" 'Timeout exceeded while reading body'
## check --request-timeout value with no time unit
output_message=$(kubectl get pod valid-pod --request-timeout=1 2>&1)
kube::test::if_has_string "${output_message}" 'valid-pod'
## check --request-timeout value with invalid time unit
output_message=$(! kubectl get pod valid-pod --request-timeout="1p" 2>&1)
kube::test::if_has_string "${output_message}" 'Invalid timeout value'
# cleanup
kubectl delete pods valid-pod "${kube_flags[@]}"
set +o nounset
set +o errexit
}

104
vendor/k8s.io/kubernetes/test/cmd/run.sh generated vendored Executable file
View File

@@ -0,0 +1,104 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
run_kubectl_run_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl run"
## kubectl run should create deployments, jobs or cronjob
# Pre-Condition: no Job exists
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run pi --generator=job/v1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
# Post-Condition: Job "pi" is created
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By"
# Clean up
kubectl delete jobs pi "${kube_flags[@]}"
# Post-condition: no pods exist.
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Pre-Condition: no Deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run nginx-extensions "--image=$IMAGE_NGINX" "${kube_flags[@]}"
# Post-Condition: Deployment "nginx" is created
kube::test::get_object_assert deployment.extensions "{{range.items}}{{$id_field}}:{{end}}" 'nginx-extensions:'
# new generator was used
output_message=$(kubectl get deployment.extensions/nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_string "${output_message}" '2'
# Clean up
kubectl delete deployment nginx-extensions "${kube_flags[@]}"
# Command
kubectl run nginx-apps "--image=$IMAGE_NGINX" --generator=deployment/apps.v1beta1 "${kube_flags[@]}"
# Post-Condition: Deployment "nginx" is created
kube::test::get_object_assert deployment.apps "{{range.items}}{{$id_field}}:{{end}}" 'nginx-apps:'
# and new generator was used, iow. new defaults are applied
output_message=$(kubectl get deployment/nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_string "${output_message}" '2'
# Clean up
kubectl delete deployment nginx-apps "${kube_flags[@]}"
# Pre-Condition: no Job exists
kube::test::get_object_assert cronjobs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run pi --schedule="*/5 * * * *" --generator=cronjob/v1beta1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
# Post-Condition: CronJob "pi" is created
kube::test::get_object_assert cronjobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
# Pre-condition: cronjob has perl image, not custom image
output_message=$(kubectl get cronjob/pi -o jsonpath='{..image}')
kube::test::if_has_not_string "${output_message}" "custom-image"
kube::test::if_has_string "${output_message}" "${IMAGE_PERL}"
# Set cronjob image
kubectl set image cronjob/pi '*=custom-image'
# Post-condition: cronjob has custom image, not perl image
output_message=$(kubectl get cronjob/pi -o jsonpath='{..image}')
kube::test::if_has_string "${output_message}" "custom-image"
kube::test::if_has_not_string "${output_message}" "${IMAGE_PERL}"
# Clean up
kubectl delete cronjobs pi "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_cmd_with_img_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing cmd with image"
# Test that a valid image reference value is provided as the value of --image in `kubectl run <name> --image`
output_message=$(kubectl run test1 --image=validname)
kube::test::if_has_string "${output_message}" 'deployment.apps/test1 created'
kubectl delete deployments test1
# test invalid image name
output_message=$(! kubectl run test2 --image=InvalidImageName 2>&1)
kube::test::if_has_string "${output_message}" 'error: Invalid image name "InvalidImageName": invalid reference format'
set +o nounset
set +o errexit
}

105
vendor/k8s.io/kubernetes/test/cmd/save-config.sh generated vendored Executable file
View File

@@ -0,0 +1,105 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# Runs tests for --save-config tests.
run_save_config_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl --save-config"
## Configuration annotations should be set when --save-config is enabled
## 1. kubectl create --save-config should generate configuration annotation
# Pre-Condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create a pod "test-pod"
kubectl create -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 2. kubectl edit --save-config should generate configuration annotation
# Pre-Condition: no POD exists, then create pod "test-pod", which shouldn't have configuration annotation
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: edit the pod "test-pod"
temp_editor="${KUBE_TEMP}/tmp-editor.sh"
echo -e "#!/usr/bin/env bash\n${SED} -i \"s/test-pod-label/test-pod-label-edited/g\" \$@" > "${temp_editor}"
chmod +x "${temp_editor}"
EDITOR=${temp_editor} kubectl edit pod test-pod --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 3. kubectl replace --save-config should generate configuration annotation
# Pre-Condition: no POD exists, then create pod "test-pod", which shouldn't have configuration annotation
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: replace the pod "test-pod"
kubectl replace -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 4. kubectl run --save-config should generate configuration annotation
# Pre-Condition: no RC exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create the rc "nginx" with image nginx
kubectl run nginx "--image=$IMAGE_NGINX" --save-config --generator=run/v1 "${kube_flags[@]}"
# Post-Condition: rc "nginx" has configuration annotation
[[ "$(kubectl get rc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
## 5. kubectl expose --save-config should generate configuration annotation
# Pre-Condition: no service exists
kube::test::get_object_assert svc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: expose the rc "nginx"
kubectl expose rc nginx --save-config --port=80 --target-port=8000 "${kube_flags[@]}"
# Post-Condition: service "nginx" has configuration annotation
[[ "$(kubectl get svc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete rc,svc nginx
## 6. kubectl autoscale --save-config should generate configuration annotation
# Pre-Condition: no RC exists, then create the rc "frontend", which shouldn't have configuration annotation
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
! [[ "$(kubectl get rc frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: autoscale rc "frontend"
kubectl autoscale -f hack/testdata/frontend-controller.yaml --save-config "${kube_flags[@]}" --max=2
# Post-Condition: hpa "frontend" has configuration annotation
[[ "$(kubectl get hpa frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Ensure we can interact with HPA objects in lists through autoscaling/v1 APIs
output_message=$(kubectl get hpa -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
output_message=$(kubectl get hpa.autoscaling -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
# tests kubectl group prefix matching
output_message=$(kubectl get hpa.autoscal -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
# Clean up
# Note that we should delete hpa first, otherwise it may fight with the rc reaper.
kubectl delete hpa frontend "${kube_flags[@]}"
kubectl delete rc frontend "${kube_flags[@]}"
set +o nounset
set +o errexit
}

110
vendor/k8s.io/kubernetes/test/cmd/storage.sh generated vendored Executable file
View File

@@ -0,0 +1,110 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
run_persistent_volumes_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing persistent volumes"
### Create and delete persistent volume examples
# Pre-condition: no persistent volumes currently exist
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/local-01.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0001:'
kubectl delete pv pv0001 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/local-02.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0002:'
kubectl delete pv pv0002 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/gce.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0003:'
kubectl delete pv pv0003 "${kube_flags[@]}"
# Post-condition: no PVs
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_persistent_volume_claims_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing persistent volumes claims"
### Create and delete persistent volume claim examples
# Pre-condition: no persistent volume claims currently exist
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-01.yaml "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-1:'
kubectl delete pvc myclaim-1 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-02.yaml "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-2:'
kubectl delete pvc myclaim-2 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-03.json "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-3:'
kubectl delete pvc myclaim-3 "${kube_flags[@]}"
# Post-condition: no PVCs
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_storage_class_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing storage class"
### Create and delete storage class
# Pre-condition: no storage classes currently exist
kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "StorageClass",
"apiVersion": "storage.k8s.io/v1",
"metadata": {
"name": "storage-class-name"
},
"provisioner": "kubernetes.io/fake-provisioner-type",
"parameters": {
"zone":"us-east-1b",
"type":"ssd"
}
}
__EOF__
kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" 'storage-class-name:'
kube::test::get_object_assert sc "{{range.items}}{{$id_field}}:{{end}}" 'storage-class-name:'
kubectl delete storageclass storage-class-name "${kube_flags[@]}"
# Post-condition: no storage classes
kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}

250
vendor/k8s.io/kubernetes/test/cmd/template-output.sh generated vendored Executable file
View File

@@ -0,0 +1,250 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
run_template_output_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing --template support on commands"
### Test global request timeout option
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# check that create supports --template output
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod POD is created
kubectl get "${kube_flags[@]}" pods -o json
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# check that patch command supports --template output
output_message=$(kubectl "${kube_flags[@]}" patch --dry-run pods/valid-pod -p '{"patched":"value3"}' --type=merge --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'valid-pod:'
# check that label command supports --template output
output_message=$(kubectl "${kube_flags[@]}" label --dry-run pods/valid-pod label=value --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'valid-pod:'
# check that annotate command supports --template output
output_message=$(kubectl "${kube_flags[@]}" annotate --dry-run pods/valid-pod annotation=value --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'valid-pod:'
# check that apply command supports --template output
output_message=$(kubectl "${kube_flags[@]}" apply --dry-run -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'valid-pod:'
# check that create command supports --template output
output_message=$(kubectl "${kube_flags[@]}" create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml --dry-run --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'valid-pod:'
# check that autoscale command supports --template output
output_message=$(kubectl "${kube_flags[@]}" autoscale --max=2 -f hack/testdata/scale-deploy-1.yaml --dry-run --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'scale-1:'
# check that expose command supports --template output
output_message=$(kubectl "${kube_flags[@]}" expose -f hack/testdata/redis-slave-replicaset.yaml --save-config --port=80 --target-port=8000 --dry-run --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'redis-slave:'
# check that convert command supports --template output
output_message=$(kubectl "${kube_flags[@]}" convert -f hack/testdata/deployment-revision1.yaml --output-version=apps/v1beta1 --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'nginx:'
# check that run command supports --template output
output_message=$(kubectl "${kube_flags[@]}" run --dry-run --template="{{ .metadata.name }}:" pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)')
kube::test::if_has_string "${output_message}" 'pi:'
# check that taint command supports --template output
output_message=$(kubectl "${kube_flags[@]}" taint node 127.0.0.1 dedicated=foo:PreferNoSchedule --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" '127.0.0.1:'
# untaint node
kubectl taint node 127.0.0.1 dedicated-
# check that "apply set-last-applied" command supports --template output
kubectl "${kube_flags[@]}" create -f test/e2e/testing-manifests/statefulset/cassandra/controller.yaml
output_message=$(kubectl "${kube_flags[@]}" apply set-last-applied -f test/e2e/testing-manifests/statefulset/cassandra/controller.yaml --dry-run --create-annotation --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'cassandra:'
# check that "auth reconcile" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" auth reconcile --dry-run -f test/fixtures/pkg/kubectl/cmd/auth/rbac-resource-plus.yaml --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'testing-CR:testing-CRB:testing-RB:testing-R:'
# check that "create clusterrole" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" create clusterrole --template="{{ .metadata.name }}:" --verb get myclusterrole --non-resource-url /logs/ --resource pods)
kube::test::if_has_string "${output_message}" 'myclusterrole:'
# check that "create clusterrolebinding" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" create clusterrolebinding foo --clusterrole=myclusterrole --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'foo:'
# check that "create configmap" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" create configmap cm --dry-run --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'cm:'
# check that "create deployment" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" create deployment deploy --image=nginx --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'deploy:'
# check that "create job" command supports --template output
kubectl create "${kube_flags[@]}" -f - <<EOF
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: pi
spec:
schedule: "*/10 * * * *"
jobTemplate:
spec:
template:
metadata:
labels:
parent: "pi"
spec:
containers:
- name: pi
image: perl
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
restartPolicy: OnFailure
EOF
output_message=$(kubectl "${kube_flags[@]}" create job foo --from=cronjob/pi --dry-run --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'foo:'
# check that "create namespace" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" create ns bar --dry-run --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'bar:'
# check that "create namespace" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" create rolebinding foo --clusterrole=myclusterrole --dry-run --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'foo:'
# check that "create role" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" create role --dry-run --template="{{ .metadata.name }}:" --verb get myrole --resource pods)
kube::test::if_has_string "${output_message}" 'myrole:'
# check that "create quota" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" create quota foo --dry-run --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'foo:'
# check that "create priorityclass" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" create priorityclass foo --dry-run --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'foo:'
# check that "create poddisruptionbudget" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" create poddisruptionbudget foo --dry-run --selector=foo --min-available=1 --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'foo:'
# check that "create serviceaccount" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" create serviceaccount foo --dry-run --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'foo:'
# check that "set env" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" set env pod/valid-pod --dry-run A=B --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'valid-pod:'
# check that "set image" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" set image pod/valid-pod --dry-run kubernetes-serve-hostname=nginx --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'valid-pod:'
# check that "set resources" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" set resources pod/valid-pod --limits=memory=256Mi --dry-run --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'valid-pod:'
# check that "set selector" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" set selector -f hack/testdata/kubernetes-service.yaml A=B --local --dry-run --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'kubernetes:'
# check that "set serviceaccount" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" set serviceaccount pod/valid-pod deployer --dry-run --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'valid-pod:'
# check that "set subject" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" set subject clusterrolebinding/foo --user=foo --dry-run --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'foo:'
# check that "create secret docker-registry" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" create secret docker-registry foo --docker-username user --docker-password pass --docker-email foo@bar.baz --dry-run --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'foo:'
# check that "create secret generic" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" create secret generic foo --from-literal=key1=value1 --dry-run --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'foo:'
# check that "create secret tls" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" create secret tls --dry-run foo --key=hack/testdata/tls.key --cert=hack/testdata/tls.crt --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'foo:'
# check that "create service clusterip" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" create service clusterip foo --dry-run --tcp=8080 --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'foo:'
# check that "create service externalname" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" create service externalname foo --dry-run --external-name=bar --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'foo:'
# check that "create service loadbalancer" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" create service loadbalancer foo --dry-run --tcp=8080 --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'foo:'
# check that "create service nodeport" command supports --template output
output_message=$(kubectl "${kube_flags[@]}" create service nodeport foo --dry-run --tcp=8080 --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'foo:'
# check that "config view" ouputs "yaml" as its default output format
output_message=$(kubectl "${kube_flags[@]}" config view)
kube::test::if_has_string "${output_message}" 'kind: Config'
# check that "rollout pause" supports --template output
output_message=$(kubectl "${kube_flags[@]}" rollout pause deploy/deploy --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'deploy:'
# check that "rollout history" supports --template output
output_message=$(kubectl "${kube_flags[@]}" rollout history deploy/deploy --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'deploy:'
# check that "rollout resume" supports --template output
output_message=$(kubectl "${kube_flags[@]}" rollout resume deploy/deploy --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'deploy:'
# check that "rollout undo" supports --template output
output_message=$(kubectl "${kube_flags[@]}" rollout undo deploy/deploy --template="{{ .metadata.name }}:")
kube::test::if_has_string "${output_message}" 'deploy:'
# check that "config view" command supports --template output
# and that commands that set a default output (yaml in this case),
# default to "go-template" as their output format when a --template
# value is provided, but no explicit --output format is given.
output_message=$(kubectl "${kube_flags[@]}" config view --template="{{ .kind }}:")
kube::test::if_has_string "${output_message}" 'Config'
# check that running a command with both a --template flag and a
# non-template --output prefers the non-template output value
output_message=$(kubectl "${kube_flags[@]}" create configmap cm --dry-run --template="{{ .metadata.name }}:" --output yaml)
kube::test::if_has_string "${output_message}" 'kind: ConfigMap'
# cleanup
kubectl delete cronjob pi "${kube_flags[@]}"
kubectl delete pods --all "${kube_flags[@]}"
kubectl delete rc cassandra "${kube_flags[@]}"
kubectl delete clusterrole myclusterrole "${kube_flags[@]}"
kubectl delete clusterrolebinding foo "${kube_flags[@]}"
kubectl delete deployment deploy "${kube_flags[@]}"
set +o nounset
set +o errexit
}

69
vendor/k8s.io/kubernetes/test/cmd/version.sh generated vendored Executable file
View File

@@ -0,0 +1,69 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
########################################################
# Kubectl version (--short, --client, --output) #
########################################################
run_kubectl_version_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl version"
TEMP="${KUBE_TEMP}"
kubectl get "${kube_flags[@]}" --raw /version
# create version files, one for the client, one for the server.
# these are the files we will use to ensure that the remainder output is correct
kube::test::version::object_to_file "Client" "" "${TEMP}/client_version_test"
kube::test::version::object_to_file "Server" "" "${TEMP}/server_version_test"
kube::log::status "Testing kubectl version: check client only output matches expected output"
kube::test::version::object_to_file "Client" "--client" "${TEMP}/client_only_version_test"
kube::test::version::object_to_file "Client" "--client" "${TEMP}/server_client_only_version_test"
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_only_version_test" "the flag '--client' shows correct client info"
kube::test::version::diff_assert "${TEMP}/server_version_test" "ne" "${TEMP}/server_client_only_version_test" "the flag '--client' correctly has no server version info"
kube::log::status "Testing kubectl version: verify json output"
kube::test::version::json_client_server_object_to_file "" "clientVersion" "${TEMP}/client_json_version_test"
kube::test::version::json_client_server_object_to_file "" "serverVersion" "${TEMP}/server_json_version_test"
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_json_version_test" "--output json has correct client info"
kube::test::version::diff_assert "${TEMP}/server_version_test" "eq" "${TEMP}/server_json_version_test" "--output json has correct server info"
kube::log::status "Testing kubectl version: verify json output using additional --client flag does not contain serverVersion"
kube::test::version::json_client_server_object_to_file "--client" "clientVersion" "${TEMP}/client_only_json_version_test"
kube::test::version::json_client_server_object_to_file "--client" "serverVersion" "${TEMP}/server_client_only_json_version_test"
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_only_json_version_test" "--client --output json has correct client info"
kube::test::version::diff_assert "${TEMP}/server_version_test" "ne" "${TEMP}/server_client_only_json_version_test" "--client --output json has no server info"
kube::log::status "Testing kubectl version: compare json output using additional --short flag"
kube::test::version::json_client_server_object_to_file "--short" "clientVersion" "${TEMP}/client_short_json_version_test"
kube::test::version::json_client_server_object_to_file "--short" "serverVersion" "${TEMP}/server_short_json_version_test"
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_short_json_version_test" "--short --output client json info is equal to non short result"
kube::test::version::diff_assert "${TEMP}/server_version_test" "eq" "${TEMP}/server_short_json_version_test" "--short --output server json info is equal to non short result"
kube::log::status "Testing kubectl version: compare json output with yaml output"
kube::test::version::json_object_to_file "" "${TEMP}/client_server_json_version_test"
kube::test::version::yaml_object_to_file "" "${TEMP}/client_server_yaml_version_test"
kube::test::version::diff_assert "${TEMP}/client_server_json_version_test" "eq" "${TEMP}/client_server_yaml_version_test" "--output json/yaml has identical information"
set +o nounset
set +o errexit
}

View File

@@ -34,7 +34,7 @@ genrule(
"//test/e2e_node:all-srcs",
],
outs = ["conformance.txt"],
cmd = "./$(location :conformance) $(locations //test/e2e:all-srcs) $(locations //test/e2e_node:all-srcs) > $@",
cmd = "./$(location :conformance) $(locations //test/e2e:all-srcs) > $@",
message = "Listing all conformance tests.",
tools = [":conformance"],
)

View File

@@ -6,6 +6,8 @@ test/e2e/apimachinery/garbage_collector.go: "should orphan RS created by deploym
test/e2e/apimachinery/garbage_collector.go: "should keep the rc around until all its pods are deleted if the deleteOptions says so"
test/e2e/apimachinery/garbage_collector.go: "should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted"
test/e2e/apimachinery/garbage_collector.go: "should not be blocked by dependency circle"
test/e2e/apimachinery/namespace.go: "should ensure that all pods are removed when a namespace is deleted"
test/e2e/apimachinery/namespace.go: "should ensure that all services are removed when a namespace is deleted"
test/e2e/apimachinery/watch.go: "should observe add, update, and delete watch notifications on configmaps"
test/e2e/apimachinery/watch.go: "should be able to start watching from a specific resource version"
test/e2e/apimachinery/watch.go: "should be able to restart watching from the last resource version observed by the previous watch"
@@ -15,6 +17,11 @@ test/e2e/apps/daemon_set.go: "should run and stop complex daemon"
test/e2e/apps/daemon_set.go: "should retry creating failed daemon pods"
test/e2e/apps/daemon_set.go: "should update pod when spec was updated and update strategy is RollingUpdate"
test/e2e/apps/daemon_set.go: "should rollback without unnecessary restarts"
test/e2e/apps/deployment.go: "RollingUpdateDeployment should delete old pods and create new ones"
test/e2e/apps/deployment.go: "RecreateDeployment should delete old pods and create new ones"
test/e2e/apps/deployment.go: "deployment should delete old replica sets"
test/e2e/apps/deployment.go: "deployment should support rollover"
test/e2e/apps/deployment.go: "deployment should support proportional scaling"
test/e2e/apps/rc.go: "should serve a basic image on each replica with a public image"
test/e2e/apps/replica_set.go: "should serve a basic image on each replica with a public image"
test/e2e/apps/statefulset.go: "should perform rolling updates and roll backs of template modifications"
@@ -33,6 +40,7 @@ test/e2e/common/configmap_volume.go: "should be consumable from pods in volume w
test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with mappings and Item mode set"
test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with mappings as non-root"
test/e2e/common/configmap_volume.go: "updates should be reflected in volume"
test/e2e/common/configmap_volume.go: "binary data should be reflected in volume"
test/e2e/common/configmap_volume.go: "optional updates should be reflected in volume"
test/e2e/common/configmap_volume.go: "should be consumable in multiple volumes in the same pod"
test/e2e/common/container_probe.go: "with readiness probe should not be ready before initial delay and never restart"
@@ -80,7 +88,15 @@ test/e2e/common/expansion.go: "should allow composing env vars into new env vars
test/e2e/common/expansion.go: "should allow substituting values in a container's command"
test/e2e/common/expansion.go: "should allow substituting values in a container's args"
test/e2e/common/host_path.go: "should give a volume the correct mode"
test/e2e/common/init_container.go: "should invoke init containers on a RestartNever pod"
test/e2e/common/init_container.go: "should invoke init containers on a RestartAlways pod"
test/e2e/common/init_container.go: "should not start app containers if init containers fail on a RestartAlways pod"
test/e2e/common/init_container.go: "should not start app containers and fail the pod if init containers fail on a RestartNever pod"
test/e2e/common/kubelet_etc_hosts.go: "should test kubelet managed /etc/hosts file"
test/e2e/common/lifecycle_hook.go: "should execute poststart exec hook properly"
test/e2e/common/lifecycle_hook.go: "should execute prestop exec hook properly"
test/e2e/common/lifecycle_hook.go: "should execute poststart http hook properly"
test/e2e/common/lifecycle_hook.go: "should execute prestop http hook properly"
test/e2e/common/networking.go: "should function for intra-pod communication: http"
test/e2e/common/networking.go: "should function for intra-pod communication: udp"
test/e2e/common/networking.go: "should function for node-pod communication: http"
@@ -125,6 +141,7 @@ test/e2e/common/secrets_volume.go: "should be consumable from pods in volume wit
test/e2e/common/secrets_volume.go: "should be consumable from pods in volume as non-root with defaultMode and fsGroup set"
test/e2e/common/secrets_volume.go: "should be consumable from pods in volume with mappings"
test/e2e/common/secrets_volume.go: "should be consumable from pods in volume with mappings and Item Mode set"
test/e2e/common/secrets_volume.go: "should be able to mount in a volume regardless of a different secret existing with same name in different namespace"
test/e2e/common/secrets_volume.go: "should be consumable in multiple volumes in a pod"
test/e2e/common/secrets_volume.go: "optional updates should be reflected in volume"
test/e2e/kubectl/kubectl.go: "should create and stop a replication controller"
@@ -165,13 +182,8 @@ test/e2e/node/pre_stop.go: "should call prestop when killing a pod"
test/e2e/scheduling/predicates.go: "validates resource limits of pods that are allowed to run"
test/e2e/scheduling/predicates.go: "validates that NodeSelector is respected if not matching"
test/e2e/scheduling/predicates.go: "validates that NodeSelector is respected if matching"
test/e2e_node/kubelet_test.go: "it should print the output to logs"
test/e2e_node/kubelet_test.go: "it should not write to root filesystem"
test/e2e_node/lifecycle_hook_test.go: "should execute poststart exec hook properly"
test/e2e_node/lifecycle_hook_test.go: "should execute prestop exec hook properly"
test/e2e_node/lifecycle_hook_test.go: "should execute poststart http hook properly"
test/e2e_node/lifecycle_hook_test.go: "should execute prestop http hook properly"
test/e2e_node/mirror_pod_test.go: "should be updated when static pod updated"
test/e2e_node/mirror_pod_test.go: "should be recreated when mirror pod gracefully deleted"
test/e2e_node/mirror_pod_test.go: "should be recreated when mirror pod forcibly deleted"
test/e2e_node/runtime_conformance_test.go: "it should run with the expected status"
test/e2e/storage/subpath.go: "should support subpaths with secret pod"
test/e2e/storage/subpath.go: "should support subpaths with configmap pod"
test/e2e/storage/subpath.go: "should support subpaths with configmap pod with mountPath of existing file"
test/e2e/storage/subpath.go: "should support subpaths with downward pod"
test/e2e/storage/subpath.go: "should support subpaths with projected pod"

View File

@@ -46,6 +46,18 @@ go_library(
"//pkg/cloudprovider/providers/azure:go_default_library",
"//pkg/cloudprovider/providers/gce:go_default_library",
"//pkg/version:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/logs:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/plugin/pkg/client/auth:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/ginkgowrapper:go_default_library",
@@ -58,18 +70,6 @@ go_library(
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
"//vendor/github.com/onsi/ginkgo/reporters:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/logs:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/plugin/pkg/client/auth:go_default_library",
],
)

View File

@@ -29,6 +29,45 @@ go_library(
"//pkg/apis/rbac:go_default_library",
"//pkg/printers:go_default_library",
"//pkg/util/version:go_default_library",
"//staging/src/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
"//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/authorization/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1beta1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
"//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1:go_default_library",
"//test/e2e/apps:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
@@ -37,44 +76,6 @@ go_library(
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
"//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/authorization/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/dynamic:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/util/cert:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
"//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1:go_default_library",
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
"//vendor/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1:go_default_library",
],
)

View File

@@ -133,7 +133,7 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
// kubectl create -f deploy.yaml
deploymentName := "sample-apiserver-deployment"
etcdImage := "quay.io/coreos/etcd:v3.2.18"
etcdImage := "quay.io/coreos/etcd:v3.2.24"
podLabels := map[string]string{"app": "sample-apiserver", "apiserver": "true"}
replicas := int32(1)
zero := int64(0)
@@ -436,7 +436,7 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
unstruct := &unstructuredv1.Unstructured{}
err = unstruct.UnmarshalJSON(jsonFlunder)
framework.ExpectNoError(err, "unmarshalling test-flunder as unstructured for create using dynamic client")
unstruct, err = dynamicClient.Create(unstruct)
unstruct, err = dynamicClient.Create(unstruct, metav1.CreateOptions{})
framework.ExpectNoError(err, "listing flunders using dynamic client")
// kubectl get flunders

View File

@@ -19,12 +19,17 @@ package apimachinery
import (
"fmt"
"math/rand"
"reflect"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/storage/storagebackend"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/test/e2e/framework"
)
@@ -34,11 +39,10 @@ const numberOfTotalResources = 400
var _ = SIGDescribe("Servers with support for API chunking", func() {
f := framework.NewDefaultFramework("chunking")
It("should return chunks of results for list calls", func() {
BeforeEach(func() {
ns := f.Namespace.Name
c := f.ClientSet
client := c.CoreV1().PodTemplates(ns)
By("creating a large number of resources")
workqueue.Parallelize(20, numberOfTotalResources, func(i int) {
for tries := 3; tries >= 0; tries-- {
@@ -61,7 +65,12 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
}
Fail("Unable to create template %d, exiting", i)
})
})
It("should return chunks of results for list calls", func() {
ns := f.Namespace.Name
c := f.ClientSet
client := c.CoreV1().PodTemplates(ns)
By("retrieving those results in paged fashion several times")
for i := 0; i < 3; i++ {
opts := metav1.ListOptions{}
@@ -81,9 +90,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
if len(lastRV) == 0 {
lastRV = list.ResourceVersion
}
if lastRV != list.ResourceVersion {
Expect(list.ResourceVersion).To(Equal(lastRV))
}
Expect(list.ResourceVersion).To(Equal(lastRV))
for _, item := range list.Items {
Expect(item.Name).To(Equal(fmt.Sprintf("template-%04d", found)))
found++
@@ -101,4 +108,81 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
Expect(err).ToNot(HaveOccurred())
Expect(list.Items).To(HaveLen(numberOfTotalResources))
})
It("should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent", func() {
ns := f.Namespace.Name
c := f.ClientSet
client := c.CoreV1().PodTemplates(ns)
By("retrieving the first page")
oneTenth := int64(numberOfTotalResources / 10)
opts := metav1.ListOptions{}
opts.Limit = oneTenth
list, err := client.List(opts)
// TODO: kops PR job is still using etcd2, which prevents this feature from working. Remove this check when kops is upgraded to etcd3
if len(list.Items) > int(opts.Limit) {
framework.Skipf("ERROR: This cluster does not support chunking, which means it is running etcd2 and not supported.")
}
Expect(err).ToNot(HaveOccurred())
firstToken := list.Continue
firstRV := list.ResourceVersion
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, firstToken)
By("retrieving the second page until the token expires")
opts.Continue = firstToken
var inconsistentToken string
wait.Poll(20*time.Second, 2*storagebackend.DefaultCompactInterval, func() (bool, error) {
_, err := client.List(opts)
if err == nil {
framework.Logf("Token %s has not expired yet", firstToken)
return false, nil
}
if err != nil && !errors.IsResourceExpired(err) {
return false, err
}
framework.Logf("got error %s", err)
status, ok := err.(errors.APIStatus)
if !ok {
return false, fmt.Errorf("expect error to implement the APIStatus interface, got %v", reflect.TypeOf(err))
}
inconsistentToken = status.Status().ListMeta.Continue
if len(inconsistentToken) == 0 {
return false, fmt.Errorf("expect non empty continue token")
}
framework.Logf("Retrieved inconsistent continue %s", inconsistentToken)
return true, nil
})
By("retrieving the second page again with the token received with the error message")
opts.Continue = inconsistentToken
list, err = client.List(opts)
Expect(err).ToNot(HaveOccurred())
Expect(list.ResourceVersion).ToNot(Equal(firstRV))
Expect(len(list.Items)).To(BeNumerically("==", opts.Limit))
found := oneTenth
for _, item := range list.Items {
Expect(item.Name).To(Equal(fmt.Sprintf("template-%04d", found)))
found++
}
By("retrieving all remaining pages")
opts.Continue = list.Continue
lastRV := list.ResourceVersion
for {
list, err := client.List(opts)
Expect(err).ToNot(HaveOccurred())
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
Expect(len(list.Items)).To(BeNumerically("<=", opts.Limit))
Expect(list.ResourceVersion).To(Equal(lastRV))
for _, item := range list.Items {
Expect(item.Name).To(Equal(fmt.Sprintf("template-%04d", found)))
found++
}
if len(list.Continue) == 0 {
break
}
opts.Continue = list.Continue
}
Expect(found).To(BeNumerically("==", numberOfTotalResources))
})
})

View File

@@ -21,7 +21,7 @@ import (
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apiextensions-apiserver/test/integration/testserver"
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -63,14 +63,14 @@ var _ = SIGDescribe("CustomResourceDefinition Watch", func() {
framework.Failf("failed to initialize apiExtensionClient: %v", err)
}
noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped)
noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, f.DynamicClient)
noxuDefinition := fixtures.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped)
noxuDefinition, err = fixtures.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, f.DynamicClient)
if err != nil {
framework.Failf("failed to create CustomResourceDefinition: %v", err)
}
defer func() {
err = testserver.DeleteCustomResourceDefinition(noxuDefinition, apiExtensionClient)
err = fixtures.DeleteCustomResourceDefinition(noxuDefinition, apiExtensionClient)
if err != nil {
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
}
@@ -85,8 +85,8 @@ var _ = SIGDescribe("CustomResourceDefinition Watch", func() {
watchB, err := watchCRWithName(noxuResourceClient, watchCRNameB)
Expect(err).NotTo(HaveOccurred())
testCrA := testserver.NewNoxuInstance(ns, watchCRNameA)
testCrB := testserver.NewNoxuInstance(ns, watchCRNameB)
testCrA := fixtures.NewNoxuInstance(ns, watchCRNameA)
testCrB := fixtures.NewNoxuInstance(ns, watchCRNameB)
By("Creating first CR ")
testCrA, err = instantiateCustomResource(testCrA, noxuResourceClient, noxuDefinition)
@@ -125,7 +125,7 @@ func watchCRWithName(crdResourceClient dynamic.ResourceInterface, name string) (
}
func instantiateCustomResource(instanceToCreate *unstructured.Unstructured, client dynamic.ResourceInterface, definition *apiextensionsv1beta1.CustomResourceDefinition) (*unstructured.Unstructured, error) {
createdInstance, err := client.Create(instanceToCreate)
createdInstance, err := client.Create(instanceToCreate, metav1.CreateOptions{})
if err != nil {
return nil, err
}

View File

@@ -19,7 +19,7 @@ package apimachinery
import (
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apiextensions-apiserver/test/integration/testserver"
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
@@ -34,9 +34,9 @@ var _ = SIGDescribe("CustomResourceDefinition resources", func() {
Context("Simple CustomResourceDefinition", func() {
/*
Testname: crd-creation-test
Description: Create a random Custom Resource Definition and make sure
the API returns success.
Release : v1.9
Testname: Custom Resource Definition, create
Description: Create a API extension client, define a random custom resource definition, create the custom resource. API server MUST be able to create the custom resource.
*/
framework.ConformanceIt("creating/deleting custom resource definition objects works ", func() {
@@ -52,16 +52,16 @@ var _ = SIGDescribe("CustomResourceDefinition resources", func() {
framework.Failf("failed to initialize apiExtensionClient: %v", err)
}
randomDefinition := testserver.NewRandomNameCustomResourceDefinition(v1beta1.ClusterScoped)
randomDefinition := fixtures.NewRandomNameCustomResourceDefinition(v1beta1.ClusterScoped)
//create CRD and waits for the resource to be recognized and available.
randomDefinition, err = testserver.CreateNewCustomResourceDefinition(randomDefinition, apiExtensionClient, f.DynamicClient)
randomDefinition, err = fixtures.CreateNewCustomResourceDefinition(randomDefinition, apiExtensionClient, f.DynamicClient)
if err != nil {
framework.Failf("failed to create CustomResourceDefinition: %v", err)
}
defer func() {
err = testserver.DeleteCustomResourceDefinition(randomDefinition, apiExtensionClient)
err = fixtures.DeleteCustomResourceDefinition(randomDefinition, apiExtensionClient)
if err != nil {
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
}

View File

@@ -27,7 +27,7 @@ import (
"k8s.io/api/extensions/v1beta1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/testserver"
apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/fixtures"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -104,7 +104,7 @@ func getPodTemplateSpec(labels map[string]string) v1.PodTemplateSpec {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
},
},
},
@@ -128,10 +128,6 @@ func newOwnerDeployment(f *framework.Framework, deploymentName string, labels ma
}
}
func getSelector() map[string]string {
return map[string]string{"app": "gc-test"}
}
func newOwnerRC(f *framework.Framework, name string, replicas int32, labels map[string]string) *v1.ReplicationController {
template := getPodTemplateSpec(labels)
return &v1.ReplicationController{
@@ -151,45 +147,6 @@ func newOwnerRC(f *framework.Framework, name string, replicas int32, labels map[
}
}
// verifyRemainingDeploymentsReplicaSetsPods verifies if the number
// of the remaining deployments, replica set and pods are deploymentNum,
// rsNum and podNum. It returns error if the communication with the API
// server fails.
func verifyRemainingDeploymentsReplicaSetsPods(
f *framework.Framework,
clientSet clientset.Interface,
deployment *v1beta1.Deployment,
deploymentNum, rsNum, podNum int,
) (bool, error) {
var ret = true
rs, err := clientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list rs: %v", err)
}
if len(rs.Items) != rsNum {
ret = false
By(fmt.Sprintf("expected %d rs, got %d rs", rsNum, len(rs.Items)))
}
deployments, err := clientSet.ExtensionsV1beta1().Deployments(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list deployments: %v", err)
}
if len(deployments.Items) != deploymentNum {
ret = false
By(fmt.Sprintf("expected %d Deployments, got %d Deployments", deploymentNum, len(deployments.Items)))
}
pods, err := clientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
if len(pods.Items) != podNum {
ret = false
By(fmt.Sprintf("expected %v Pods, got %d Pods", podNum, len(pods.Items)))
}
return ret, nil
}
func newGCPod(name string) *v1.Pod {
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
@@ -204,69 +161,77 @@ func newGCPod(name string) *v1.Pod {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
},
},
},
}
}
// verifyRemainingReplicationControllersPods verifies if the number of the remaining replication
// controllers and pods are rcNum and podNum. It returns error if the
// communication with the API server fails.
func verifyRemainingReplicationControllersPods(f *framework.Framework, clientSet clientset.Interface, rcNum, podNum int) (bool, error) {
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
pods, err := clientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
var ret = true
if len(pods.Items) != podNum {
ret = false
By(fmt.Sprintf("expected %d pods, got %d pods", podNum, len(pods.Items)))
}
rcs, err := rcClient.List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list replication controllers: %v", err)
}
if len(rcs.Items) != rcNum {
ret = false
By(fmt.Sprintf("expected %d RCs, got %d RCs", rcNum, len(rcs.Items)))
}
return ret, nil
}
// verifyRemainingCronJobsJobsPods verifies if the number of remaining cronjobs,
// jobs and pods. It returns error if the communication with the API server fails.
func verifyRemainingCronJobsJobsPods(f *framework.Framework, clientSet clientset.Interface,
cjNum, jobNum, podNum int) (bool, error) {
// verifyRemainingObjects verifies if the number of remaining objects.
// It returns error if the communication with the API server fails.
func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (bool, error) {
var ret = true
cronJobs, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list cronjobs: %v", err)
}
if len(cronJobs.Items) != cjNum {
ret = false
By(fmt.Sprintf("expected %d cronjobs, got %d cronjobs", cjNum, len(cronJobs.Items)))
}
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list jobs: %v", err)
}
if len(jobs.Items) != jobNum {
ret = false
By(fmt.Sprintf("expected %d jobs, got %d jobs", jobNum, len(jobs.Items)))
}
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
if len(pods.Items) != podNum {
ret = false
By(fmt.Sprintf("expected %d pods, got %d pods", podNum, len(pods.Items)))
for object, num := range objects {
switch object {
case "Pods":
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list pods: %v", err)
}
if len(pods.Items) != num {
ret = false
By(fmt.Sprintf("expected %d pods, got %d pods", num, len(pods.Items)))
}
case "Deployments":
deployments, err := f.ClientSet.ExtensionsV1beta1().Deployments(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list deployments: %v", err)
}
if len(deployments.Items) != num {
ret = false
By(fmt.Sprintf("expected %d Deployments, got %d Deployments", num, len(deployments.Items)))
}
case "ReplicaSets":
rs, err := f.ClientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list rs: %v", err)
}
if len(rs.Items) != num {
ret = false
By(fmt.Sprintf("expected %d rs, got %d rs", num, len(rs.Items)))
}
case "ReplicationControllers":
rcs, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list replication controllers: %v", err)
}
if len(rcs.Items) != num {
ret = false
By(fmt.Sprintf("expected %d RCs, got %d RCs", num, len(rcs.Items)))
}
case "CronJobs":
cronJobs, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list cronjobs: %v", err)
}
if len(cronJobs.Items) != num {
ret = false
By(fmt.Sprintf("expected %d cronjobs, got %d cronjobs", num, len(cronJobs.Items)))
}
case "Jobs":
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list jobs: %v", err)
}
if len(jobs.Items) != num {
ret = false
By(fmt.Sprintf("expected %d jobs, got %d jobs", num, len(jobs.Items)))
}
default:
return false, fmt.Errorf("object %s is not supported", object)
}
}
return ret, nil
@@ -312,7 +277,7 @@ func newCronJob(name, schedule string) *batchv1beta1.CronJob {
Containers: []v1.Container{
{
Name: "c",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sleep", "300"},
},
},
@@ -336,10 +301,9 @@ var _ = SIGDescribe("Garbage collector", func() {
f := framework.NewDefaultFramework("gc")
/*
Testname: garbage-collector-delete-rc--propagation-background
Description: Ensure that if deleteOptions.PropagationPolicy is set to Background,
then deleting a ReplicationController should cause pods created
by that RC to also be deleted.
Release : v1.9
Testname: Garbage Collector, delete replication controller, propagation policy background
Description: Create a replication controller with 2 Pods. Once RC is created and the first Pod is created, delete RC with deleteOptions.PropagationPolicy set to Background. Deleting the Replication Controller MUST cause pods created by that RC to be deleted.
*/
framework.ConformanceIt("should delete pods created by rc when not orphaning", func() {
clientSet := f.ClientSet
@@ -380,7 +344,8 @@ var _ = SIGDescribe("Garbage collector", func() {
By("wait for all pods to be garbage collected")
// wait for the RCs and Pods to reach the expected numbers.
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
return verifyRemainingReplicationControllersPods(f, clientSet, 0, 0)
objects := map[string]int{"ReplicationControllers": 0, "Pods": 0}
return verifyRemainingObjects(f, objects)
}); err != nil {
framework.Failf("failed to wait for all pods to be deleted: %v", err)
remainingPods, err := podClient.List(metav1.ListOptions{})
@@ -394,10 +359,9 @@ var _ = SIGDescribe("Garbage collector", func() {
})
/*
Testname: garbage-collector-delete-rc--propagation-orphan
Description: Ensure that if deleteOptions.PropagationPolicy is set to Orphan,
then deleting a ReplicationController should cause pods created
by that RC to be orphaned.
Release : v1.9
Testname: Garbage Collector, delete replication controller, propagation policy orphan
Description: Create a replication controller with maximum allocatable Pods between 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC with deleteOptions.PropagationPolicy set to Orphan. Deleting the Replication Controller MUST cause pods created by that RC to be orphaned.
*/
framework.ConformanceIt("should orphan pods created by rc if delete options say so", func() {
clientSet := f.ClientSet
@@ -463,6 +427,8 @@ var _ = SIGDescribe("Garbage collector", func() {
gatherMetrics(f)
})
// deleteOptions.OrphanDependents is deprecated in 1.7 and preferred to use the PropagationPolicy.
// Discussion is tracked under https://github.com/kubernetes/kubernetes/issues/65427 to promote for conformance in future.
It("should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func() {
clientSet := f.ClientSet
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
@@ -508,10 +474,9 @@ var _ = SIGDescribe("Garbage collector", func() {
})
/*
Testname: garbage-collector-delete-deployment-propagation-background
Description: Ensure that if deleteOptions.PropagationPolicy is set to Background,
then deleting a Deployment should cause ReplicaSets created
by that Deployment to also be deleted.
Release : v1.9
Testname: Garbage Collector, delete deployment, propagation policy background
Description: Create a deployment with a replicaset. Once replicaset is created , delete the deployment with deleteOptions.PropagationPolicy set to Background. Deleting the deployment MUST delete the replicaset created by the deployment and also the Pods that belong to the deployments MUST be deleted.
*/
framework.ConformanceIt("should delete RS created by deployment when not orphaning", func() {
clientSet := f.ClientSet
@@ -547,7 +512,8 @@ var _ = SIGDescribe("Garbage collector", func() {
}
By("wait for all rs to be garbage collected")
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
return verifyRemainingDeploymentsReplicaSetsPods(f, clientSet, deployment, 0, 0, 0)
objects := map[string]int{"Deployments": 0, "ReplicaSets": 0, "Pods": 0}
return verifyRemainingObjects(f, objects)
})
if err != nil {
errList := make([]error, 0)
@@ -567,10 +533,9 @@ var _ = SIGDescribe("Garbage collector", func() {
})
/*
Testname: garbage-collector-delete-deployment-propagation-true
Description: Ensure that if deleteOptions.PropagationPolicy is set to Orphan,
then deleting a Deployment should cause ReplicaSets created
by that Deployment to be orphaned.
Release : v1.9
Testname: Garbage Collector, delete deployment, propagation policy orphan
Description: Create a deployment with a replicaset. Once replicaset is created , delete the deployment with deleteOptions.PropagationPolicy set to Orphan. Deleting the deployment MUST cause the replicaset created by the deployment to be orphaned, also the Pods created by the deployments MUST be orphaned.
*/
framework.ConformanceIt("should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan", func() {
clientSet := f.ClientSet
@@ -606,7 +571,8 @@ var _ = SIGDescribe("Garbage collector", func() {
}
By("wait for 30 seconds to see if the garbage collector mistakenly deletes the rs")
time.Sleep(30 * time.Second)
ok, err := verifyRemainingDeploymentsReplicaSetsPods(f, clientSet, deployment, 0, 1, 2)
objects := map[string]int{"Deployments": 0, "ReplicaSets": 1, "Pods": 2}
ok, err := verifyRemainingObjects(f, objects)
if err != nil {
framework.Failf("Unexpected error while verifying remaining deployments, rs, and pods: %v", err)
}
@@ -641,9 +607,9 @@ var _ = SIGDescribe("Garbage collector", func() {
})
/*
Testname: garbage-collector-delete-rc-after-owned-pods
Description: Ensure that if deleteOptions.PropagationPolicy is set to Foreground,
then a ReplicationController should not be deleted until all its dependent pods are deleted.
Release : v1.9
Testname: Garbage Collector, delete replication controller, after owned pods
Description: Create a replication controller with maximum allocatable Pods between 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC with deleteOptions.PropagationPolicy set to Foreground. Deleting the Replication Controller MUST cause pods created by that RC to be deleted before the RC is deleted.
*/
framework.ConformanceIt("should keep the rc around until all its pods are deleted if the deleteOptions says so", func() {
clientSet := f.ClientSet
@@ -729,9 +695,9 @@ var _ = SIGDescribe("Garbage collector", func() {
// TODO: this should be an integration test
/*
Testname: garbage-collector-multiple-owners
Description: Ensure that if a Pod has multiple valid owners, it will not be deleted
when one of of those owners gets deleted.
Release : v1.9
Testname: Garbage Collector, multiple owners
Description: Create a replication controller RC1, with maximum allocatable Pods between 10 and 100 replicas. Create second replication controller RC2 and set RC2 as owner for half of those replicas. Once RC1 is created and the all Pods are created, delete RC1 with deleteOptions.PropagationPolicy set to Foreground. Half of the Pods that has RC2 as owner MUST not be deleted but have a deletion timestamp. Deleting the Replication Controller MUST not delete Pods that are owned by multiple replication controllers.
*/
framework.ConformanceIt("should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted", func() {
clientSet := f.ClientSet
@@ -843,9 +809,9 @@ var _ = SIGDescribe("Garbage collector", func() {
// TODO: should be an integration test
/*
Testname: garbage-collector-dependency-cycle
Description: Ensure that a dependency cycle will
not block the garbage collector.
Release : v1.9
Testname: Garbage Collector, dependency cycle
Description: Create three pods, patch them with Owner references such that pod1 has pod3, pod2 has pod1 and pod3 has pod2 as owner references respectively. Delete pod1 MUST delete all pods. The dependency cycle MUST not block the garbage collection.
*/
framework.ConformanceIt("should not be blocked by dependency circle", func() {
clientSet := f.ClientSet
@@ -939,7 +905,7 @@ var _ = SIGDescribe("Garbage collector", func() {
},
},
}
persistedOwner, err := resourceClient.Create(owner)
persistedOwner, err := resourceClient.Create(owner, metav1.CreateOptions{})
if err != nil {
framework.Failf("failed to create owner resource %q: %v", ownerName, err)
}
@@ -964,7 +930,7 @@ var _ = SIGDescribe("Garbage collector", func() {
},
},
}
persistedDependent, err := resourceClient.Create(dependent)
persistedDependent, err := resourceClient.Create(dependent, metav1.CreateOptions{})
if err != nil {
framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
}
@@ -1040,7 +1006,7 @@ var _ = SIGDescribe("Garbage collector", func() {
},
},
}
persistedOwner, err := resourceClient.Create(owner)
persistedOwner, err := resourceClient.Create(owner, metav1.CreateOptions{})
if err != nil {
framework.Failf("failed to create owner resource %q: %v", ownerName, err)
}
@@ -1065,7 +1031,7 @@ var _ = SIGDescribe("Garbage collector", func() {
},
},
}
_, err = resourceClient.Create(dependent)
_, err = resourceClient.Create(dependent, metav1.CreateOptions{})
if err != nil {
framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
}
@@ -1127,7 +1093,8 @@ var _ = SIGDescribe("Garbage collector", func() {
}
By("Verify if cronjob does not leave jobs nor pods behind")
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
return verifyRemainingCronJobsJobsPods(f, f.ClientSet, 0, 0, 0)
objects := map[string]int{"CronJobs": 0, "Jobs": 0, "Pods": 0}
return verifyRemainingObjects(f, objects)
})
if err != nil {
framework.Failf("Failed to wait for all jobs and pods to be deleted: %v", err)

View File

@@ -49,7 +49,7 @@ func stagingClientPod(name, value string) v1.Pod {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
Ports: []v1.ContainerPort{{ContainerPort: 80}},
},
},
@@ -70,7 +70,7 @@ func testingPod(name, value string) v1.Pod {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
Ports: []v1.ContainerPort{{ContainerPort: 80}},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
@@ -243,7 +243,7 @@ func newTestingCronJob(name string, value string) *batchv1beta1.CronJob {
Containers: []v1.Container{
{
Name: "c",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
VolumeMounts: []v1.VolumeMount{
{
MountPath: "/data",

View File

@@ -262,10 +262,18 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
f := framework.NewDefaultFramework("namespaces")
It("should ensure that all pods are removed when a namespace is deleted.",
/*
Testname: namespace-deletion-removes-pods
Description: Ensure that if a namespace is deleted then all pods are removed from that namespace.
*/
framework.ConformanceIt("should ensure that all pods are removed when a namespace is deleted",
func() { ensurePodsAreRemovedWhenNamespaceIsDeleted(f) })
It("should ensure that all services are removed when a namespace is deleted.",
/*
Testname: namespace-deletion-removes-services
Description: Ensure that if a namespace is deleted then all services are removed from that namespace.
*/
framework.ConformanceIt("should ensure that all services are removed when a namespace is deleted",
func() { ensureServicesAreRemovedWhenNamespaceIsDeleted(f) })
It("should delete fast enough (90 percent of 100 namespaces in 150 seconds)",

View File

@@ -52,21 +52,23 @@ const (
roleBindingName = "webhook-auth-reader"
// The webhook configuration names should not be reused between test instances.
crWebhookConfigName = "e2e-test-webhook-config-cr"
webhookConfigName = "e2e-test-webhook-config"
mutatingWebhookConfigName = "e2e-test-mutating-webhook-config"
podMutatingWebhookConfigName = "e2e-test-mutating-webhook-pod"
crMutatingWebhookConfigName = "e2e-test-mutating-webhook-config-cr"
webhookFailClosedConfigName = "e2e-test-webhook-fail-closed"
webhookForWebhooksConfigName = "e2e-test-webhook-for-webhooks-config"
removableValidatingHookName = "e2e-test-should-be-removable-validating-webhook-config"
removableMutatingHookName = "e2e-test-should-be-removable-mutating-webhook-config"
crdWebhookConfigName = "e2e-test-webhook-config-crd"
crWebhookConfigName = "e2e-test-webhook-config-cr"
webhookConfigName = "e2e-test-webhook-config"
attachingPodWebhookConfigName = "e2e-test-webhook-config-attaching-pod"
mutatingWebhookConfigName = "e2e-test-mutating-webhook-config"
podMutatingWebhookConfigName = "e2e-test-mutating-webhook-pod"
crMutatingWebhookConfigName = "e2e-test-mutating-webhook-config-cr"
webhookFailClosedConfigName = "e2e-test-webhook-fail-closed"
webhookForWebhooksConfigName = "e2e-test-webhook-for-webhooks-config"
removableValidatingHookName = "e2e-test-should-be-removable-validating-webhook-config"
removableMutatingHookName = "e2e-test-should-be-removable-mutating-webhook-config"
crdWebhookConfigName = "e2e-test-webhook-config-crd"
skipNamespaceLabelKey = "skip-webhook-admission"
skipNamespaceLabelValue = "yes"
skippedNamespaceName = "exempted-namesapce"
disallowedPodName = "disallowed-pod"
toBeAttachedPodName = "to-be-attached-pod"
hangingPodName = "hanging-pod"
disallowedConfigMapName = "disallowed-configmap"
allowedConfigMapName = "allowed-configmap"
@@ -117,6 +119,12 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
testWebhook(f)
})
It("Should be able to deny attaching pod", func() {
webhookCleanup := registerWebhookForAttachingPod(f, context)
defer webhookCleanup()
testAttachingPodWebhook(f)
})
It("Should be able to deny custom resource creation", func() {
testcrd, err := framework.CreateTestCRD(f)
if err != nil {
@@ -405,6 +413,53 @@ func registerWebhook(f *framework.Framework, context *certContext) func() {
}
}
func registerWebhookForAttachingPod(f *framework.Framework, context *certContext) func() {
client := f.ClientSet
By("Registering the webhook via the AdmissionRegistration API")
namespace := f.Namespace.Name
configName := attachingPodWebhookConfigName
// A webhook that cannot talk to server, with fail-open policy
failOpenHook := failingWebhook(namespace, "fail-open.k8s.io")
policyIgnore := v1beta1.Ignore
failOpenHook.FailurePolicy = &policyIgnore
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: configName,
},
Webhooks: []v1beta1.Webhook{
{
Name: "deny-attaching-pod.k8s.io",
Rules: []v1beta1.RuleWithOperations{{
Operations: []v1beta1.OperationType{v1beta1.Connect},
Rule: v1beta1.Rule{
APIGroups: []string{""},
APIVersions: []string{"v1"},
Resources: []string{"pods/attach"},
},
}},
ClientConfig: v1beta1.WebhookClientConfig{
Service: &v1beta1.ServiceReference{
Namespace: namespace,
Name: serviceName,
Path: strPtr("/pods/attach"),
},
CABundle: context.signingCert,
},
},
},
})
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
// The webhook configuration is honored in 10s.
time.Sleep(10 * time.Second)
return func() {
client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(configName, nil)
}
}
func registerMutatingWebhookForConfigMap(f *framework.Framework, context *certContext) func() {
client := f.ClientSet
By("Registering the mutating configmap webhook via the AdmissionRegistration API")
@@ -576,7 +631,7 @@ func testWebhook(f *framework.Framework) {
pod = hangingPod(f)
_, err = client.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(BeNil())
expectedTimeoutErr := "request did not complete within allowed duration"
expectedTimeoutErr := "request did not complete within"
if !strings.Contains(err.Error(), expectedTimeoutErr) {
framework.Failf("expect timeout error %q, got %q", expectedTimeoutErr, err.Error())
}
@@ -642,6 +697,21 @@ func testWebhook(f *framework.Framework) {
Expect(err).To(BeNil())
}
func testAttachingPodWebhook(f *framework.Framework) {
By("create a pod")
client := f.ClientSet
pod := toBeAttachedPod(f)
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(BeNil())
By("'kubectl attach' the pod, should be denied by the webhook")
_, err = framework.NewKubectlCommand("attach", fmt.Sprintf("--namespace=%v", f.Namespace.Name), pod.Name, "-i", "-c=container1").Exec()
Expect(err).NotTo(BeNil())
if e, a := "attaching to pod 'to-be-attached-pod' is not allowed", err.Error(); !strings.Contains(a, e) {
framework.Failf("unexpected 'kubectl attach' error message. expected to contain %q, got %q", e, a)
}
}
// failingWebhook returns a webhook with rule of create configmaps,
// but with an invalid client config so that server cannot communicate with it
func failingWebhook(namespace, name string) v1beta1.Webhook {
@@ -930,6 +1000,22 @@ func hangingPod(f *framework.Framework) *v1.Pod {
}
}
func toBeAttachedPod(f *framework.Framework) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: toBeAttachedPodName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container1",
Image: imageutils.GetPauseImageName(),
},
},
},
}
}
func nonCompliantConfigMap(f *framework.Framework) *v1.ConfigMap {
return &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
@@ -1100,7 +1186,7 @@ func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1
},
},
}
_, err := customResourceClient.Create(crInstance)
_, err := customResourceClient.Create(crInstance, metav1.CreateOptions{})
Expect(err).NotTo(BeNil())
expectedErrMsg := "the custom resource contains unwanted data"
if !strings.Contains(err.Error(), expectedErrMsg) {
@@ -1123,7 +1209,7 @@ func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextension
},
},
}
mutatedCR, err := customResourceClient.Create(cr)
mutatedCR, err := customResourceClient.Create(cr, metav1.CreateOptions{})
Expect(err).To(BeNil())
expectedCRData := map[string]interface{}{
"mutation-start": "yes",

View File

@@ -37,7 +37,28 @@ go_library(
"//pkg/controller/replication:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/scheduler/cache:go_default_library",
"//pkg/util/pointer:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1beta1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
@@ -45,27 +66,7 @@ go_library(
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library",
],
)

View File

@@ -5,3 +5,5 @@ approvers:
- mfojtik
reviewers:
- sig-apps-reviewers
labels:
- sig/apps

View File

@@ -34,6 +34,7 @@ import (
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/controller/job"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
@@ -298,7 +299,7 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batchv1beta1.Concur
Containers: []v1.Container{
{
Name: "c",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
VolumeMounts: []v1.VolumeMount{
{
MountPath: "/data",

View File

@@ -24,8 +24,7 @@ import (
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
apierrs "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
@@ -54,6 +53,10 @@ const (
daemonsetColorLabel = daemonsetLabelPrefix + "color"
)
// The annotation key scheduler.alpha.kubernetes.io/node-selector is for assigning
// node selectors labels to namespaces
var NamespaceNodeSelectors = []string{"scheduler.alpha.kubernetes.io/node-selector"}
// This test must be run in serial because it assumes the Daemon Set pods will
// always get scheduled. If we run other tests in parallel, this may not
// happen. In the future, running in parallel may work if we have an eviction
@@ -100,7 +103,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ns = f.Namespace.Name
c = f.ClientSet
err := clearDaemonSetNodeLabels(c)
updatedNS, err := updateNamespaceAnnotations(c, ns)
Expect(err).NotTo(HaveOccurred())
ns = updatedNS.Name
err = clearDaemonSetNodeLabels(c)
Expect(err).NotTo(HaveOccurred())
})
@@ -495,6 +504,26 @@ func clearDaemonSetNodeLabels(c clientset.Interface) error {
return nil
}
// updateNamespaceAnnotations sets node selectors related annotations on tests namespaces to empty
func updateNamespaceAnnotations(c clientset.Interface, nsName string) (*v1.Namespace, error) {
nsClient := c.CoreV1().Namespaces()
ns, err := nsClient.Get(nsName, metav1.GetOptions{})
if err != nil {
return nil, err
}
if ns.Annotations == nil {
ns.Annotations = make(map[string]string)
}
for _, n := range NamespaceNodeSelectors {
ns.Annotations[n] = ""
}
return nsClient.Update(ns)
}
func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) {
nodeClient := c.CoreV1().Nodes()
var newNode *v1.Node
@@ -520,7 +549,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
newLabels, _ = separateDaemonSetNodeLabels(newNode.Labels)
return true, err
}
if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
if se, ok := err.(*apierrors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
framework.Logf("failed to update node due to resource version conflict")
return false, nil
}
@@ -734,7 +763,7 @@ func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *a
func waitFailedDaemonPodDeleted(c clientset.Interface, pod *v1.Pod) func() (bool, error) {
return func() (bool, error) {
if _, err := c.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil {
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
return true, nil
}
return false, fmt.Errorf("failed to get failed daemon pod %q: %v", pod.Name, err)

View File

@@ -38,9 +38,9 @@ import (
clientset "k8s.io/client-go/kubernetes"
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
"k8s.io/kubernetes/test/e2e/framework"
testutil "k8s.io/kubernetes/test/utils"
utilpointer "k8s.io/utils/pointer"
)
const (
@@ -70,16 +70,35 @@ var _ = SIGDescribe("Deployment", func() {
It("deployment reaping should cascade to its replica sets and pods", func() {
testDeleteDeployment(f)
})
It("RollingUpdateDeployment should delete old pods and create new ones", func() {
/*
Testname: Deployment RollingUpdate
Description: A conformant Kubernetes distribution MUST support the Deployment with RollingUpdate strategy.
*/
framework.ConformanceIt("RollingUpdateDeployment should delete old pods and create new ones", func() {
testRollingUpdateDeployment(f)
})
It("RecreateDeployment should delete old pods and create new ones", func() {
/*
Testname: Deployment Recreate
Description: A conformant Kubernetes distribution MUST support the Deployment with Recreate strategy.
*/
framework.ConformanceIt("RecreateDeployment should delete old pods and create new ones", func() {
testRecreateDeployment(f)
})
It("deployment should delete old replica sets", func() {
/*
Testname: Deployment RevisionHistoryLimit
Description: A conformant Kubernetes distribution MUST clean up Deployment's ReplicaSets based on
the Deployment's `.spec.revisionHistoryLimit`.
*/
framework.ConformanceIt("deployment should delete old replica sets", func() {
testDeploymentCleanUpPolicy(f)
})
It("deployment should support rollover", func() {
/*
Testname: Deployment Rollover
Description: A conformant Kubernetes distribution MUST support Deployment rollover,
i.e. allow arbitrary number of changes to desired state during rolling update
before the rollout finishes.
*/
framework.ConformanceIt("deployment should support rollover", func() {
testRolloverDeployment(f)
})
It("deployment should support rollback", func() {
@@ -91,7 +110,13 @@ var _ = SIGDescribe("Deployment", func() {
It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() {
testDeploymentsControllerRef(f)
})
It("deployment should support proportional scaling", func() {
/*
Testname: Deployment Proportional Scaling
Description: A conformant Kubernetes distribution MUST support Deployment
proportional scaling, i.e. proportionally scale a Deployment's ReplicaSets
when a Deployment is scaled.
*/
framework.ConformanceIt("deployment should support proportional scaling", func() {
testProportionalScalingDeployment(f)
})
// TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues

View File

@@ -44,7 +44,7 @@ var _ = SIGDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions")
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
Expect(err).NotTo(HaveOccurred())
})
@@ -63,7 +63,7 @@ var _ = SIGDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions")
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
Expect(err).NotTo(HaveOccurred())
})
@@ -84,7 +84,7 @@ var _ = SIGDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions")
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
Expect(err).NotTo(HaveOccurred())
})

View File

@@ -106,12 +106,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
f := framework.NewDefaultFramework("network-partition")
var c clientset.Interface
var ns string
ignoreLabels := framework.ImagePullerLabels
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
_, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
_, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
Expect(err).NotTo(HaveOccurred())
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
@@ -197,7 +196,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
go controller.Run(stopCh)
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
host := framework.GetNodeExternalIP(&node)
host, err := framework.GetNodeExternalIP(&node)
framework.ExpectNoError(err)
master := framework.GetMasterAddress(c)
defer func() {
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
@@ -574,7 +574,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
go controller.Run(stopCh)
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
host := framework.GetNodeExternalIP(&node)
host, err := framework.GetNodeExternalIP(&node)
framework.ExpectNoError(err)
master := framework.GetMasterAddress(c)
defer func() {
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))

View File

@@ -17,6 +17,7 @@ limitations under the License.
package apps
import (
"context"
"fmt"
"strings"
"time"
@@ -31,6 +32,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@@ -248,6 +250,14 @@ var _ = SIGDescribe("StatefulSet", func() {
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
})
// This can't be Conformance yet because it depends on a default
// StorageClass and a dynamic provisioner.
It("should perform rolling updates and roll backs of template modifications with PVCs", func() {
By("Creating a new StatefulSet with PVCs")
*(ss.Spec.Replicas) = 3
rollbackTest(c, ns, ss)
})
/*
Release : v1.9
Testname: StatefulSet, Rolling Update
@@ -256,116 +266,7 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func() {
By("Creating a new StatefulSet")
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
sst := framework.NewStatefulSetTester(c)
sst.SetHttpProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
Expect(err).NotTo(HaveOccurred())
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).To(Equal(updateRevision),
fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
ss.Namespace, ss.Name, updateRevision, currentRevision))
pods := sst.GetPodList(ss)
for i := range pods.Items {
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
currentRevision))
}
sst.SortStatefulPods(pods)
sst.BreakPodHttpProbe(ss, &pods.Items[1])
Expect(err).NotTo(HaveOccurred())
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
newImage := NewNginxImage
oldImage := ss.Spec.Template.Spec.Containers[0].Image
By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))
Expect(oldImage).NotTo(Equal(newImage), "Incorrect test setup: should update to a different image")
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
Expect(err).NotTo(HaveOccurred())
By("Creating a new revision")
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).NotTo(Equal(updateRevision),
"Current revision should not equal update revision during rolling update")
By("Updating Pods in reverse ordinal order")
pods = sst.GetPodList(ss)
sst.SortStatefulPods(pods)
sst.RestorePodHttpProbe(ss, &pods.Items[1])
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
ss, pods = sst.WaitForRollingUpdate(ss)
Expect(ss.Status.CurrentRevision).To(Equal(updateRevision),
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
ss.Namespace,
ss.Name,
ss.Status.CurrentRevision,
updateRevision))
for i := range pods.Items {
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage),
fmt.Sprintf(" Pod %s/%s has image %s not have new image %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
newImage))
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to update revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
updateRevision))
}
By("Rolling back to a previous revision")
sst.BreakPodHttpProbe(ss, &pods.Items[1])
Expect(err).NotTo(HaveOccurred())
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
priorRevision := currentRevision
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = oldImage
})
Expect(err).NotTo(HaveOccurred())
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).NotTo(Equal(updateRevision),
"Current revision should not equal update revision during roll back")
Expect(priorRevision).To(Equal(updateRevision),
"Prior revision should equal update revision during roll back")
By("Rolling back update in reverse ordinal order")
pods = sst.GetPodList(ss)
sst.SortStatefulPods(pods)
sst.RestorePodHttpProbe(ss, &pods.Items[1])
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
ss, pods = sst.WaitForRollingUpdate(ss)
Expect(ss.Status.CurrentRevision).To(Equal(priorRevision),
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
ss.Namespace,
ss.Name,
ss.Status.CurrentRevision,
updateRevision))
for i := range pods.Items {
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage),
fmt.Sprintf("Pod %s/%s has image %s not equal to previous image %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
oldImage))
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(priorRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to prior revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
priorRevision))
}
rollbackTest(c, ns, ss)
})
/*
@@ -700,7 +601,9 @@ var _ = SIGDescribe("StatefulSet", func() {
By("Verifying that stateful set " + ssName + " was scaled up in order")
expectedOrder := []string{ssName + "-0", ssName + "-1", ssName + "-2"}
_, err = watch.Until(framework.StatefulSetTimeout, watcher, func(event watch.Event) (bool, error) {
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulSetTimeout)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) {
if event.Type != watch.Added {
return false, nil
}
@@ -731,7 +634,9 @@ var _ = SIGDescribe("StatefulSet", func() {
By("Verifying that stateful set " + ssName + " was scaled down in reverse order")
expectedOrder = []string{ssName + "-2", ssName + "-1", ssName + "-0"}
_, err = watch.Until(framework.StatefulSetTimeout, watcher, func(event watch.Event) (bool, error) {
ctx, cancel = watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulSetTimeout)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) {
if event.Type != watch.Deleted {
return false, nil
}
@@ -810,7 +715,7 @@ var _ = SIGDescribe("StatefulSet", func() {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
Ports: []v1.ContainerPort{conflictingPort},
},
},
@@ -837,8 +742,10 @@ var _ = SIGDescribe("StatefulSet", func() {
By("Waiting until stateful pod " + statefulPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name)
w, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: statefulPodName}))
framework.ExpectNoError(err)
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulPodTimeout)
defer cancel()
// we need to get UID from pod in any state and wait until stateful set controller will remove pod atleast once
_, err = watch.Until(framework.StatefulPodTimeout, w, func(event watch.Event) (bool, error) {
_, err = watchtools.UntilWithoutRetry(ctx, w, func(event watch.Event) (bool, error) {
pod := event.Object.(*v1.Pod)
switch event.Type {
case watch.Deleted:
@@ -862,7 +769,7 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.ExpectNoError(err)
By("Waiting when stateful pod " + statefulPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state")
// we may catch delete event, thats why we are waiting for running phase like this, and not with watch.Until
// we may catch delete event, that's why we are waiting for running phase like this, and not with watchtools.UntilWithoutRetry
Eventually(func() error {
statefulPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(statefulPodName, metav1.GetOptions{})
if err != nil {
@@ -1176,3 +1083,119 @@ func pollReadWithTimeout(statefulPod statefulPodTester, statefulPodNumber int, k
}
return err
}
// This function is used by two tests to test StatefulSet rollbacks: one using
// PVCs and one using no storage.
func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
sst := framework.NewStatefulSetTester(c)
sst.SetHttpProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
Expect(err).NotTo(HaveOccurred())
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).To(Equal(updateRevision),
fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
ss.Namespace, ss.Name, updateRevision, currentRevision))
pods := sst.GetPodList(ss)
for i := range pods.Items {
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
currentRevision))
}
sst.SortStatefulPods(pods)
err = sst.BreakPodHttpProbe(ss, &pods.Items[1])
Expect(err).NotTo(HaveOccurred())
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
newImage := NewNginxImage
oldImage := ss.Spec.Template.Spec.Containers[0].Image
By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))
Expect(oldImage).NotTo(Equal(newImage), "Incorrect test setup: should update to a different image")
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
Expect(err).NotTo(HaveOccurred())
By("Creating a new revision")
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).NotTo(Equal(updateRevision),
"Current revision should not equal update revision during rolling update")
By("Updating Pods in reverse ordinal order")
pods = sst.GetPodList(ss)
sst.SortStatefulPods(pods)
err = sst.RestorePodHttpProbe(ss, &pods.Items[1])
Expect(err).NotTo(HaveOccurred())
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
ss, pods = sst.WaitForRollingUpdate(ss)
Expect(ss.Status.CurrentRevision).To(Equal(updateRevision),
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
ss.Namespace,
ss.Name,
ss.Status.CurrentRevision,
updateRevision))
for i := range pods.Items {
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage),
fmt.Sprintf(" Pod %s/%s has image %s not have new image %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
newImage))
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to update revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
updateRevision))
}
By("Rolling back to a previous revision")
err = sst.BreakPodHttpProbe(ss, &pods.Items[1])
Expect(err).NotTo(HaveOccurred())
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
priorRevision := currentRevision
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = oldImage
})
Expect(err).NotTo(HaveOccurred())
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).NotTo(Equal(updateRevision),
"Current revision should not equal update revision during roll back")
Expect(priorRevision).To(Equal(updateRevision),
"Prior revision should equal update revision during roll back")
By("Rolling back update in reverse ordinal order")
pods = sst.GetPodList(ss)
sst.SortStatefulPods(pods)
sst.RestorePodHttpProbe(ss, &pods.Items[1])
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
ss, pods = sst.WaitForRollingUpdate(ss)
Expect(ss.Status.CurrentRevision).To(Equal(priorRevision),
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
ss.Namespace,
ss.Name,
ss.Status.CurrentRevision,
updateRevision))
for i := range pods.Items {
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage),
fmt.Sprintf("Pod %s/%s has image %s not equal to previous image %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
oldImage))
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(priorRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to prior revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
priorRevision))
}
}

View File

@@ -31,7 +31,7 @@ var (
CronJobGroupVersionResourceBeta = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"}
NautilusImage = imageutils.GetE2EImage(imageutils.Nautilus)
KittenImage = imageutils.GetE2EImage(imageutils.Kitten)
NginxImage = imageutils.GetE2EImage(imageutils.NginxSlim)
NewNginxImage = imageutils.GetE2EImage(imageutils.NginxSlimNew)
NginxImage = imageutils.GetE2EImage(imageutils.Nginx)
NewNginxImage = imageutils.GetE2EImage(imageutils.NginxNew)
RedisImage = imageutils.GetE2EImage(imageutils.Redis)
)

View File

@@ -12,45 +12,49 @@ go_library(
"certificates.go",
"framework.go",
"metadata_concealment.go",
"node_authn.go",
"node_authz.go",
"pod_security_policy.go",
"service_accounts.go",
],
importpath = "k8s.io/kubernetes/test/e2e/auth",
deps = [
"//pkg/master/ports:go_default_library",
"//pkg/security/apparmor:go_default_library",
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
"//pkg/security/podsecuritypolicy/util:go_default_library",
"//pkg/util/pointer:go_default_library",
"//plugin/pkg/admission/serviceaccount:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/certificates/v1beta1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/apis/audit:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/evanphx/json-patch:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/util/cert:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library",
],
)

File diff suppressed because it is too large Load Diff

View File

@@ -91,9 +91,10 @@ var _ = SIGDescribe("Certificates API", func() {
framework.Logf("waiting for CSR to be signed")
framework.ExpectNoError(wait.Poll(5*time.Second, time.Minute, func() (bool, error) {
csr, _ = csrs.Get(csrName, metav1.GetOptions{})
csr, err = csrs.Get(csrName, metav1.GetOptions{})
if err != nil {
return false, err
framework.Logf("error getting csr: %v", err)
return false, nil
}
if len(csr.Status.Certificate) == 0 {
framework.Logf("csr not signed yet")

View File

@@ -58,7 +58,7 @@ var _ = SIGDescribe("Metadata Concealment", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions")
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, int32(1))
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, int32(1))
Expect(err).NotTo(HaveOccurred())
})
})

107
vendor/k8s.io/kubernetes/test/e2e/auth/node_authn.go generated vendored Normal file
View File

@@ -0,0 +1,107 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auth
import (
"fmt"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
f := framework.NewDefaultFramework("node-authn")
var ns string
var nodeIPs []string
BeforeEach(func() {
ns = f.Namespace.Name
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(len(nodeList.Items)).NotTo(BeZero())
pickedNode := nodeList.Items[0]
nodeIPs = framework.GetNodeAddresses(&pickedNode, v1.NodeExternalIP)
// The pods running in the cluster can see the internal addresses.
nodeIPs = append(nodeIPs, framework.GetNodeAddresses(&pickedNode, v1.NodeInternalIP)...)
// make sure ServiceAccount admission controller is enabled, so secret generation on SA creation works
sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get("default", metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(len(sa.Secrets)).NotTo(BeZero())
})
It("The kubelet's main port 10250 should reject requests with no credentials", func() {
pod := createNodeAuthTestPod(f)
for _, nodeIP := range nodeIPs {
// Anonymous authentication is disabled by default
result := framework.RunHostCmdOrDie(ns, pod.Name, fmt.Sprintf("curl -sIk -o /dev/null -w '%s' https://%s:%v/metrics", "%{http_code}", nodeIP, ports.KubeletPort))
Expect(result).To(Or(Equal("401"), Equal("403")), "the kubelet's main port 10250 should reject requests with no credentials")
}
})
It("The kubelet can delegate ServiceAccount tokens to the API server", func() {
By("create a new ServiceAccount for authentication")
trueValue := true
newSA := &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: "node-auth-newSA",
},
AutomountServiceAccountToken: &trueValue,
}
_, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Create(newSA)
Expect(err).NotTo(HaveOccurred())
pod := createNodeAuthTestPod(f)
for _, nodeIP := range nodeIPs {
result := framework.RunHostCmdOrDie(ns,
pod.Name,
fmt.Sprintf("curl -sIk -o /dev/null -w '%s' --header \"Authorization: Bearer `%s`\" https://%s:%v/metrics",
"%{http_code}",
"cat /var/run/secrets/kubernetes.io/serviceaccount/token",
nodeIP, ports.KubeletPort))
Expect(result).To(Or(Equal("401"), Equal("403")), "the kubelet can delegate ServiceAccount tokens to the API server")
}
})
})
func createNodeAuthTestPod(f *framework.Framework) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "test-node-authn-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "test-node-authn",
Image: imageutils.GetE2EImage(imageutils.Hostexec),
Command: []string{"sleep 3600"},
}},
RestartPolicy: v1.RestartPolicyNever,
},
}
return f.PodClient().CreateSync(pod)
}

View File

@@ -32,10 +32,10 @@ import (
"k8s.io/kubernetes/pkg/security/apparmor"
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp"
psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util"
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
utilpointer "k8s.io/utils/pointer"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -75,7 +75,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
It("should forbid pod creation when no PSP is available", func() {
By("Running a restricted pod")
_, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "restricted"))
_, err := c.CoreV1().Pods(ns).Create(restrictedPod("restricted"))
expectForbidden(err)
})
@@ -87,11 +87,11 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
defer cleanup()
By("Running a restricted pod")
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "allowed"))
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod("allowed"))
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace))
testPrivilegedPods(f, func(pod *v1.Pod) {
testPrivilegedPods(func(pod *v1.Pod) {
_, err := c.CoreV1().Pods(ns).Create(pod)
expectForbidden(err)
})
@@ -103,11 +103,11 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
defer cleanup()
By("Running a restricted pod")
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "allowed"))
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod("allowed"))
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace))
testPrivilegedPods(f, func(pod *v1.Pod) {
testPrivilegedPods(func(pod *v1.Pod) {
_, err := c.CoreV1().Pods(ns).Create(pod)
expectForbidden(err)
})
@@ -121,7 +121,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
expectedPSP, cleanup := createAndBindPSP(f, framework.PrivilegedPSP("permissive"))
defer cleanup()
testPrivilegedPods(f, func(pod *v1.Pod) {
testPrivilegedPods(func(pod *v1.Pod) {
p, err := c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace))
@@ -143,7 +143,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
expectedPSP, cleanup := createAndBindPSPInPolicy(f, privilegedPSPInPolicy("permissive"))
defer cleanup()
testPrivilegedPods(f, func(pod *v1.Pod) {
testPrivilegedPods(func(pod *v1.Pod) {
p, err := c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace))
@@ -163,16 +163,16 @@ func expectForbidden(err error) {
Expect(apierrs.IsForbidden(err)).To(BeTrue(), "should be forbidden error")
}
func testPrivilegedPods(f *framework.Framework, tester func(pod *v1.Pod)) {
func testPrivilegedPods(tester func(pod *v1.Pod)) {
By("Running a privileged pod", func() {
privileged := restrictedPod(f, "privileged")
privileged := restrictedPod("privileged")
privileged.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true)
privileged.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation = nil
tester(privileged)
})
By("Running a HostPath pod", func() {
hostpath := restrictedPod(f, "hostpath")
hostpath := restrictedPod("hostpath")
hostpath.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{{
Name: "hp",
MountPath: "/hp",
@@ -187,26 +187,26 @@ func testPrivilegedPods(f *framework.Framework, tester func(pod *v1.Pod)) {
})
By("Running a HostNetwork pod", func() {
hostnet := restrictedPod(f, "hostnet")
hostnet := restrictedPod("hostnet")
hostnet.Spec.HostNetwork = true
tester(hostnet)
})
By("Running a HostPID pod", func() {
hostpid := restrictedPod(f, "hostpid")
hostpid := restrictedPod("hostpid")
hostpid.Spec.HostPID = true
tester(hostpid)
})
By("Running a HostIPC pod", func() {
hostipc := restrictedPod(f, "hostipc")
hostipc := restrictedPod("hostipc")
hostipc.Spec.HostIPC = true
tester(hostipc)
})
if common.IsAppArmorSupported() {
By("Running a custom AppArmor profile pod", func() {
aa := restrictedPod(f, "apparmor")
aa := restrictedPod("apparmor")
// Every node is expected to have the docker-default profile.
aa.Annotations[apparmor.ContainerAnnotationKeyPrefix+"pause"] = "localhost/docker-default"
tester(aa)
@@ -214,13 +214,13 @@ func testPrivilegedPods(f *framework.Framework, tester func(pod *v1.Pod)) {
}
By("Running an unconfined Seccomp pod", func() {
unconfined := restrictedPod(f, "seccomp")
unconfined := restrictedPod("seccomp")
unconfined.Annotations[v1.SeccompPodAnnotationKey] = "unconfined"
tester(unconfined)
})
By("Running a SYS_ADMIN pod", func() {
sysadmin := restrictedPod(f, "sysadmin")
sysadmin := restrictedPod("sysadmin")
sysadmin.Spec.Containers[0].SecurityContext.Capabilities = &v1.Capabilities{
Add: []v1.Capability{"SYS_ADMIN"},
}
@@ -311,7 +311,7 @@ func createAndBindPSPInPolicy(f *framework.Framework, pspTemplate *policy.PodSec
}
}
func restrictedPod(f *framework.Framework, name string) *v1.Pod {
func restrictedPod(name string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,

View File

@@ -153,6 +153,15 @@ var _ = SIGDescribe("ServiceAccounts", func() {
}
})
/*
Release: v1.9
Testname: Service Account Tokens Must AutoMount
Description: Ensure that Service Account keys are mounted into the Container. Pod
contains three containers each will read Service Account token,
root CA and default namespace respectively from the default API
Token Mount path. All these three files MUST exist and the Service
Account mount path MUST be auto mounted to the Container.
*/
framework.ConformanceIt("should mount an API token into pods ", func() {
var tokenContent string
var rootCAContent string
@@ -235,7 +244,33 @@ var _ = SIGDescribe("ServiceAccounts", func() {
})
})
/*
Release: v1.9
Testname: Service account tokens auto mount optionally
Description: Ensure that Service Account keys are mounted into the Pod only
when AutoMountServiceToken is not set to false. We test the
following scenarios here.
1. Create Pod, Pod Spec has AutomountServiceAccountToken set to nil
a) Service Account with default value,
b) Service Account is an configured AutomountServiceAccountToken set to true,
c) Service Account is an configured AutomountServiceAccountToken set to false
2. Create Pod, Pod Spec has AutomountServiceAccountToken set to true
a) Service Account with default value,
b) Service Account is configured with AutomountServiceAccountToken set to true,
c) Service Account is configured with AutomountServiceAccountToken set to false
3. Create Pod, Pod Spec has AutomountServiceAccountToken set to false
a) Service Account with default value,
b) Service Account is configured with AutomountServiceAccountToken set to true,
c) Service Account is configured with AutomountServiceAccountToken set to false
The Containers running in these pods MUST verify that the ServiceTokenVolume path is
auto mounted only when Pod Spec has AutomountServiceAccountToken not set to false
and ServiceAccount object has AutomountServiceAccountToken not set to false, this
include test cases 1a,1b,2a,2b and 2c.
In the test cases 1c,3a,3b and 3c the ServiceTokenVolume MUST not be auto mounted.
*/
framework.ConformanceIt("should allow opting out of API token automount ", func() {
var err error
trueValue := true
falseValue := false

View File

@@ -19,6 +19,25 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/autoscaling",
deps = [
"//pkg/apis/core:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v2beta1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/api/scheduling/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/instrumentation/monitoring:go_default_library",
@@ -30,25 +49,6 @@ go_library(
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/golang.org/x/oauth2/google:go_default_library",
"//vendor/google.golang.org/api/monitoring/v3:go_default_library",
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/api/scheduling/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)

View File

@@ -368,26 +368,6 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
})
func makeUnschedulable(f *framework.Framework, nodes []v1.Node) error {
for _, node := range nodes {
err := makeNodeUnschedulable(f.ClientSet, &node)
if err != nil {
return err
}
}
return nil
}
func makeSchedulable(f *framework.Framework, nodes []v1.Node) error {
for _, node := range nodes {
err := makeNodeSchedulable(f.ClientSet, &node, false)
if err != nil {
return err
}
}
return nil
}
func anyKey(input map[string]int) string {
for k := range input {
return k

View File

@@ -22,6 +22,7 @@ import (
"io/ioutil"
"math"
"net/http"
"os"
"os/exec"
"regexp"
"strconv"
@@ -82,6 +83,8 @@ const (
expendablePriorityClassName = "expendable-priority"
highPriorityClassName = "high-priority"
gpuLabel = "cloud.google.com/gke-accelerator"
)
var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
@@ -112,8 +115,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
nodeCount = len(nodes.Items)
coreCount = 0
for _, node := range nodes.Items {
quentity := node.Status.Capacity[v1.ResourceCPU]
coreCount += quentity.Value()
quantity := node.Status.Allocatable[v1.ResourceCPU]
coreCount += quantity.Value()
}
By(fmt.Sprintf("Initial number of schedulable nodes: %v", nodeCount))
Expect(nodeCount).NotTo(BeZero())
@@ -129,16 +132,11 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
err = enableAutoscaler("default-pool", 3, 5)
framework.ExpectNoError(err)
}
Expect(getNAPNodePoolsNumber()).Should(Equal(0))
}
})
AfterEach(func() {
if framework.ProviderIs("gke") {
By("Remove changes introduced by NAP tests")
removeNAPNodePools()
disableAutoprovisioning()
}
framework.SkipUnlessProviderIs("gce", "gke")
By(fmt.Sprintf("Restoring initial size of the cluster"))
setMigSizes(originalSizes)
expectedNodes := 0
@@ -207,107 +205,123 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]",
func() { simpleScaleUpTest(0) })
supportedGpuTypes := []string{"nvidia-tesla-k80", "nvidia-tesla-v100", "nvidia-tesla-p100"}
for _, gpuType := range supportedGpuTypes {
gpuType := gpuType // create new variable for each iteration step
gpuType := os.Getenv("TESTED_GPU_TYPE")
It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
defer deleteNodePool(gpuPoolName)
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet()
installNvidiaDriversDaemonSet()
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
By("Schedule a pod which requires GPU")
framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc"))
By("Schedule a pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
})
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
})
It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
defer deleteNodePool(gpuPoolName)
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet()
installNvidiaDriversDaemonSet()
By("Schedule a single pod which requires GPU")
framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc"))
By("Schedule a single pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2))
defer disableAutoscaler(gpuPoolName, 0, 2)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2))
defer disableAutoscaler(gpuPoolName, 0, 2)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
By("Scale GPU deployment")
framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true)
By("Scale GPU deployment")
framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true)
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(2))
})
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(2))
})
It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
defer deleteNodePool(gpuPoolName)
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet()
installNvidiaDriversDaemonSet()
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
// Expect gpu pool to stay intact
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
})
// Expect gpu pool to stay intact
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
})
It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
defer deleteNodePool(gpuPoolName)
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet()
installNvidiaDriversDaemonSet()
By("Schedule a single pod which requires GPU")
framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc"))
By("Schedule a single pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
By("Remove the only POD requiring GPU")
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
By("Remove the only POD requiring GPU")
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
})
}
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
})
It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]",
func() {
@@ -355,6 +369,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
// We wait for nodes to become schedulable to make sure the new nodes
// will be returned by getPoolNodes below.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, resizeTimeout))
glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
By("Getting memory available on new nodes, so we can account for it when creating RC")
@@ -362,7 +379,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
Expect(len(nodes)).Should(Equal(extraNodes))
extraMemMb := 0
for _, node := range nodes {
mem := node.Status.Capacity[v1.ResourceMemory]
mem := node.Status.Allocatable[v1.ResourceMemory]
extraMemMb += int((&mem).Value() / 1024 / 1024)
}
@@ -859,7 +876,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}
By("Block network connectivity to some nodes to simulate unhealthy cluster")
nodesToBreakCount := int(math.Floor(math.Max(float64(unhealthyClusterThreshold), 0.5*float64(clusterSize))))
nodesToBreakCount := int(math.Ceil(math.Max(float64(unhealthyClusterThreshold), 0.5*float64(clusterSize))))
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
@@ -894,106 +911,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectNoError(framework.WaitForReadyNodes(c, len(nodes.Items), nodesRecoverTimeout))
})
It("should add new node and new node pool on too big pod, scale down to 1 and scale down to 0 [Feature:ClusterSizeAutoscalingScaleWithNAP]", func() {
framework.SkipUnlessProviderIs("gke")
framework.ExpectNoError(enableAutoprovisioning(""))
By("Create first pod")
cleanupFunc1 := ReserveMemory(f, "memory-reservation1", 1, int(1.1*float64(memAllocatableMb)), true, defaultTimeout)
defer func() {
if cleanupFunc1 != nil {
cleanupFunc1()
}
}()
By("Waiting for scale up")
// Verify that cluster size increased.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, defaultTimeout))
By("Check if NAP group was created")
Expect(getNAPNodePoolsNumber()).Should(Equal(1))
By("Create second pod")
cleanupFunc2 := ReserveMemory(f, "memory-reservation2", 1, int(1.1*float64(memAllocatableMb)), true, defaultTimeout)
defer func() {
if cleanupFunc2 != nil {
cleanupFunc2()
}
}()
By("Waiting for scale up")
// Verify that cluster size increased.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+2 }, defaultTimeout))
By("Delete first pod")
cleanupFunc1()
cleanupFunc1 = nil
By("Waiting for scale down to 1")
// Verify that cluster size decreased.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, scaleDownTimeout))
By("Delete second pod")
cleanupFunc2()
cleanupFunc2 = nil
By("Waiting for scale down to 0")
// Verify that cluster size decreased.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
By("Waiting for NAP group remove")
framework.ExpectNoError(waitTillAllNAPNodePoolsAreRemoved())
By("Check if NAP group was removeed")
Expect(getNAPNodePoolsNumber()).Should(Equal(0))
})
It("shouldn't add new node group if not needed [Feature:ClusterSizeAutoscalingScaleWithNAP]", func() {
framework.SkipUnlessProviderIs("gke")
framework.ExpectNoError(enableAutoprovisioning(""))
By("Create pods")
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
cleanupFunc := ReserveMemory(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout)
defer cleanupFunc()
By("Waiting for scale up")
// Verify that cluster size increased.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
By("Check if NAP group was created hoping id didn't happen")
Expect(getNAPNodePoolsNumber()).Should(Equal(0))
})
It("shouldn't scale up if cores limit too low, should scale up after limit is changed [Feature:ClusterSizeAutoscalingScaleWithNAP]", func() {
framework.SkipUnlessProviderIs("gke")
By(fmt.Sprintf("Set core limit to %d", coreCount))
framework.ExpectNoError(enableAutoprovisioning(fmt.Sprintf(`"resource_limits":{"name":"cpu", "minimum":2, "maximum":%d}, "resource_limits":{"name":"memory", "minimum":0, "maximum":10000000}`, coreCount)))
// Create pod allocating 1.1 allocatable for present nodes. Bigger node will have to be created.
cleanupFunc := ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, time.Second)
defer cleanupFunc()
By(fmt.Sprintf("Waiting for scale up hoping it won't happen, sleep for %s", scaleUpTimeout.String()))
time.Sleep(scaleUpTimeout)
// Verify that cluster size is not changed
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, time.Second))
By("Change resource limits")
framework.ExpectNoError(enableAutoprovisioning(fmt.Sprintf(`"resource_limits":{"name":"cpu", "minimum":2, "maximum":%d}, "resource_limits":{"name":"memory", "minimum":0, "maximum":10000000}`, coreCount+5)))
By("Wait for scale up")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
By("Check if NAP group was created")
Expect(getNAPNodePoolsNumber()).Should(Equal(1))
})
It("should create new node if there is no node for node selector [Feature:ClusterSizeAutoscalingScaleWithNAP]", func() {
framework.SkipUnlessProviderIs("gke")
framework.ExpectNoError(enableAutoprovisioning(""))
// Create pod allocating 0.7 allocatable for present nodes with node selector.
cleanupFunc := ReserveMemoryWithSelector(f, "memory-reservation", 1, int(0.7*float64(memAllocatableMb)), true, scaleUpTimeout, map[string]string{"test": "test"})
defer cleanupFunc()
By("Waiting for scale up")
// Verify that cluster size increased.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, defaultTimeout))
By("Check if NAP group was created")
Expect(getNAPNodePoolsNumber()).Should(Equal(1))
})
It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
framework.SkipUnlessProviderIs("gce")
defer createPriorityClasses(f)()
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), false, time.Second, expendablePriorityClassName)
@@ -1006,8 +924,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
framework.SkipUnlessProviderIs("gce")
defer createPriorityClasses(f)()
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName)
@@ -1018,8 +934,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func() {
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
framework.SkipUnlessProviderIs("gce")
defer createPriorityClasses(f)()
// Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node.
cleanupFunc1 := ReserveMemoryWithPriority(f, "memory-reservation1", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, expendablePriorityClassName)
@@ -1032,8 +946,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
framework.SkipUnlessProviderIs("gce")
defer createPriorityClasses(f)()
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
@@ -1045,8 +957,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
framework.SkipUnlessProviderIs("gce")
defer createPriorityClasses(f)()
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
@@ -1248,17 +1158,6 @@ func disableAutoscaler(nodePool string, minCount, maxCount int) error {
return fmt.Errorf("autoscaler still enabled, last error: %v", finalErr)
}
func isAutoprovisioningEnabled() (bool, error) {
strBody, err := getCluster("v1alpha1")
if err != nil {
return false, err
}
if strings.Contains(strBody, "\"enableNodeAutoprovisioning\": true") {
return true, nil
}
return false, nil
}
func executeHTTPRequest(method string, url string, body string) (string, error) {
client := &http.Client{}
req, err := http.NewRequest(method, url, strings.NewReader(body))
@@ -1278,126 +1177,6 @@ func executeHTTPRequest(method string, url string, body string) (string, error)
return string(respBody), nil
}
func enableAutoprovisioning(resourceLimits string) error {
By("Using API to enable autoprovisioning.")
var body string
if resourceLimits != "" {
body = fmt.Sprintf(`{"update": {"desired_cluster_autoscaling": {"enable_node_autoprovisioning": true, %s}}}`, resourceLimits)
} else {
body = `{"update": {"desired_cluster_autoscaling": {"enable_node_autoprovisioning": true, "resource_limits":{"name":"cpu", "minimum":0, "maximum":100}, "resource_limits":{"name":"memory", "minimum":0, "maximum":10000000}}}}`
}
_, err := executeHTTPRequest(http.MethodPut, getGKEClusterURL("v1alpha1"), body)
if err != nil {
glog.Errorf("Request error: %s", err.Error())
return err
}
glog.Infof("Wait for enabling autoprovisioning.")
for start := time.Now(); time.Since(start) < gkeUpdateTimeout; time.Sleep(30 * time.Second) {
enabled, err := isAutoprovisioningEnabled()
if err != nil {
glog.Errorf("Error: %s", err.Error())
return err
}
if enabled {
By("Autoprovisioning enabled.")
return nil
}
glog.Infof("Waiting for enabling autoprovisioning")
}
return fmt.Errorf("autoprovisioning wasn't enabled (timeout).")
}
func disableAutoprovisioning() error {
enabled, err := isAutoprovisioningEnabled()
if err != nil {
glog.Errorf("Error: %s", err.Error())
return err
}
if !enabled {
By("Autoprovisioning disabled.")
return nil
}
By("Using API to disable autoprovisioning.")
_, err = executeHTTPRequest(http.MethodPut, getGKEClusterURL("v1alpha1"), "{\"update\": {\"desired_cluster_autoscaling\": {}}}")
if err != nil {
glog.Errorf("Request error: %s", err.Error())
return err
}
By("Wait for disabling autoprovisioning.")
for start := time.Now(); time.Since(start) < gkeUpdateTimeout; time.Sleep(30 * time.Second) {
enabled, err := isAutoprovisioningEnabled()
if err != nil {
glog.Errorf("Error: %s", err.Error())
return err
}
if !enabled {
By("Autoprovisioning disabled.")
return nil
}
By("Waiting for disabling autoprovisioning")
}
return fmt.Errorf("autoprovisioning wasn't disabled (timeout).")
}
func getNAPNodePools() ([]string, error) {
if framework.ProviderIs("gke") {
args := []string{"container", "node-pools", "list", "--cluster=" + framework.TestContext.CloudConfig.Cluster}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
if err != nil {
glog.Errorf("Failed to get instance groups: %v", string(output))
return nil, err
}
re := regexp.MustCompile("nap.* ")
lines := re.FindAllString(string(output), -1)
for i, line := range lines {
lines[i] = line[:strings.Index(line, " ")]
}
return lines, nil
} else {
return nil, fmt.Errorf("provider does not support NAP")
}
}
func removeNAPNodePools() error {
By("Remove NAP node pools")
pools, err := getNAPNodePools()
if err != nil {
return err
}
for _, pool := range pools {
By("Remove node pool: " + pool)
suffix := fmt.Sprintf("projects/%s/zones/%s/clusters/%s/nodePools/%s",
framework.TestContext.CloudConfig.ProjectID,
framework.TestContext.CloudConfig.Zone,
framework.TestContext.CloudConfig.Cluster,
pool)
_, err := executeHTTPRequest(http.MethodDelete, getGKEURL("v1alpha1", suffix), "")
if err != nil {
glog.Errorf("Request error: %s", err.Error())
return err
}
}
err = waitTillAllNAPNodePoolsAreRemoved()
if err != nil {
glog.Errorf(fmt.Sprintf("Couldn't remove NAP groups: %s", err.Error()))
}
return err
}
func getNAPNodePoolsNumber() int {
groups, err := getNAPNodePools()
framework.ExpectNoError(err)
return len(groups)
}
func waitTillAllNAPNodePoolsAreRemoved() error {
By("Wait till all NAP node pools are removed")
err := wait.PollImmediate(5*time.Second, defaultTimeout, func() (bool, error) {
return getNAPNodePoolsNumber() == 0, nil
})
return err
}
func addNodePool(name string, machineType string, numNodes int) {
args := []string{"container", "node-pools", "create", name, "--quiet",
"--machine-type=" + machineType,
@@ -1504,7 +1283,7 @@ func doPut(url, content string) (string, error) {
return strBody, nil
}
func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, priorityClassName string) func() error {
func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration, priorityClassName string) func() error {
By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
request := int64(1024 * 1024 * megabytes / replicas)
config := &testutils.RCConfig{
@@ -1517,6 +1296,7 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
Replicas: replicas,
MemRequest: request,
NodeSelector: selector,
Tolerations: tolerations,
PriorityClassName: priorityClassName,
}
for start := time.Now(); time.Since(start) < rcCreationRetryTimeout; time.Sleep(rcCreationRetryDelay) {
@@ -1539,19 +1319,19 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
// ReserveMemoryWithPriority creates a replication controller with pods with priority that, in summation,
// request the specified amount of memory.
func ReserveMemoryWithPriority(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, priorityClassName string) func() error {
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, priorityClassName)
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, nil, priorityClassName)
}
// ReserveMemoryWithSelector creates a replication controller with pods with node selector that, in summation,
// request the specified amount of memory.
func ReserveMemoryWithSelector(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string) func() error {
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, selector, "")
func ReserveMemoryWithSelectorAndTolerations(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration) func() error {
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, selector, tolerations, "")
}
// ReserveMemory creates a replication controller with pods that, in summation,
// request the specified amount of memory.
func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration) func() error {
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, "")
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, nil, "")
}
// WaitForClusterSizeFunc waits until the cluster size matches the given function.
@@ -1742,7 +1522,13 @@ func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAdd
return fmt.Errorf("Failed to remove taint from node in allowed number of retries")
}
func scheduleGpuPod(f *framework.Framework, id string) error {
// ScheduleAnySingleGpuPod schedules a pod which requires single GPU of any type
func ScheduleAnySingleGpuPod(f *framework.Framework, id string) error {
return ScheduleGpuPod(f, id, "", 1)
}
// ScheduleGpuPod schedules a pod which requires a given number of gpus of given type
func ScheduleGpuPod(f *framework.Framework, id string, gpuType string, gpuLimit int64) error {
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
@@ -1751,10 +1537,14 @@ func scheduleGpuPod(f *framework.Framework, id string) error {
Timeout: 3 * scaleUpTimeout, // spinning up GPU node is slow
Image: imageutils.GetPauseImageName(),
Replicas: 1,
GpuLimit: 1,
GpuLimit: gpuLimit,
Labels: map[string]string{"requires-gpu": "yes"},
}
if gpuType != "" {
config.NodeSelector = map[string]string{gpuLabel: gpuType}
}
err := framework.RunRC(*config)
if err != nil {
return err
@@ -2152,12 +1942,18 @@ func createPriorityClasses(f *framework.Framework) func() {
}
for className, priority := range priorityClasses {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority})
if err != nil {
glog.Errorf("Error creating priority class: %v", err)
}
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
}
return func() {
for className := range priorityClasses {
f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(className, nil)
err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(className, nil)
if err != nil {
glog.Errorf("Error deleting priority class: %v", err)
}
}
}
}

View File

@@ -46,7 +46,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", fu
})
SIGDescribe("[Serial] [Slow] ReplicaSet", func() {
// CPU tests via deployments
// CPU tests via ReplicaSets
It(titleUp, func() {
scaleUp("rs", common.KindReplicaSet, false, rc, f)
})

View File

@@ -12,6 +12,7 @@ go_library(
"autoscaling_utils.go",
"configmap.go",
"configmap_volume.go",
"container.go",
"container_probe.go",
"docker_containers.go",
"downward_api.go",
@@ -22,13 +23,18 @@ go_library(
"host_path.go",
"init_container.go",
"kubelet_etc_hosts.go",
"lifecycle_hook.go",
"networking.go",
"node_lease.go",
"pods.go",
"privileged.go",
"projected.go",
"runtime.go",
"secrets.go",
"secrets_volume.go",
"security_context.go",
"sysctl.go",
"ttlafterfinished.go",
"util.go",
"volumes.go",
],
@@ -40,32 +46,40 @@ go_library(
"//pkg/client/conditions:go_default_library",
"//pkg/kubelet:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/kubelet/images:go_default_library",
"//pkg/kubelet/sysctl:go_default_library",
"//pkg/security/apparmor:go_default_library",
"//pkg/util/slice:go_default_library",
"//pkg/util/version:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/coordination/v1beta1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/onsi/gomega/types:go_default_library",
"//vendor/golang.org/x/net/websocket:go_default_library",
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/scale:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
],
)

View File

@@ -117,7 +117,7 @@ done`, testCmd)
Affinity: loaderAffinity,
Containers: []api.Container{{
Name: "test",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", testCmd},
}},
RestartPolicy: api.RestartPolicyNever,

View File

@@ -43,7 +43,7 @@ import (
const (
dynamicConsumptionTimeInSeconds = 30
staticConsumptionTimeInSeconds = 3600
dynamicRequestSizeInMillicores = 20
dynamicRequestSizeInMillicores = 100
dynamicRequestSizeInMegabytes = 100
dynamicRequestSizeCustomMetric = 10
port = 80

View File

@@ -24,15 +24,16 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var _ = Describe("[sig-api-machinery] ConfigMap", func() {
f := framework.NewDefaultFramework("configmap")
/*
Testname: configmap-in-env-field
Description: Make sure config map value can be used as an environment
variable in the container (on container.env field)
Release : v1.9
Testname: ConfigMap, from environment field
Description: Create a Pod with an environment variable value set using a value from ConfigMap. A ConfigMap value MUST be accessible in the container environment.
*/
framework.ConformanceIt("should be consumable via environment variable [NodeConformance]", func() {
name := "configmap-test-" + string(uuid.NewUUID())
@@ -51,7 +52,7 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() {
Containers: []v1.Container{
{
Name: "env-test",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
Env: []v1.EnvVar{
{
@@ -78,9 +79,9 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() {
})
/*
Testname: configmap-envfrom-field
Description: Make sure config map value can be used as an source for
environment variables in the container (on container.envFrom field)
Release: v1.9
Testname: ConfigMap, from environment variables
Description: Create a Pod with a environment source from ConfigMap. All ConfigMap values MUST be available as environment variables in the container.
*/
framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func() {
name := "configmap-test-" + string(uuid.NewUUID())
@@ -99,7 +100,7 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() {
Containers: []v1.Container{
{
Name: "env-test",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
EnvFrom: []v1.EnvFromSource{
{

View File

@@ -27,24 +27,25 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var _ = Describe("[sig-storage] ConfigMap", func() {
f := framework.NewDefaultFramework("configmap")
/*
Testname: configmap-nomap-simple
Description: Make sure config map without mappings works by mounting it
to a volume with a custom path (mapping) on the pod with no other settings.
Release : v1.9
Testname: ConfigMap Volume, without mapping
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644.
*/
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
doConfigMapE2EWithoutMappings(f, 0, 0, nil)
})
/*
Testname: configmap-nomap-default-mode
Description: Make sure config map without mappings works by mounting it
to a volume with a custom path (mapping) on the pod with defaultMode set
Release : v1.9
Testname: ConfigMap Volume, without mapping, volume mode set
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. File mode is changed to a custom value of '0x400'. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST be set to the custom value of 0x400
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() {
defaultMode := int32(0400)
@@ -57,9 +58,9 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
})
/*
Testname: configmap-nomap-user
Description: Make sure config map without mappings works by mounting it
to a volume with a custom path (mapping) on the pod as non-root.
Release : v1.9
Testname: ConfigMap Volume, without mapping, non-root user
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644.
*/
framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func() {
doConfigMapE2EWithoutMappings(f, 1000, 0, nil)
@@ -70,19 +71,18 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
})
/*
Testname: configmap-simple-mapped
Description: Make sure config map works by mounting it to a volume with
a custom path (mapping) on the pod with no other settings and make sure
the pod actually consumes it.
Release : v1.9
Testname: ConfigMap Volume, with mapping
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
doConfigMapE2EWithMappings(f, 0, 0, nil)
})
/*
Testname: configmap-with-item-mode-mapped
Description: Make sure config map works with an item mode (e.g. 0400)
for the config map item.
Release : v1.9
Testname: ConfigMap Volume, with mapping, volume mode set
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. File mode is changed to a custom value of '0x400'. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST be set to the custom value of 0x400
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [NodeConformance]", func() {
mode := int32(0400)
@@ -90,8 +90,9 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
})
/*
Testname: configmap-simple-user-mapped
Description: Make sure config map works when it is mounted as non-root.
Release : v1.9
Testname: ConfigMap Volume, with mapping, non-root user
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func() {
doConfigMapE2EWithMappings(f, 1000, 0, nil)
@@ -102,9 +103,9 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
})
/*
Testname: configmap-update-test
Description: Make sure update operation is working on config map and
the result is observed on volumes mounted in containers.
Release : v1.9
Testname: ConfigMap Volume, update
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the ConfigMap is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod.
*/
framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
@@ -151,7 +152,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
Containers: []v1.Container{
{
Name: containerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -184,7 +185,12 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2"))
})
It("binary data should be reflected in volume [NodeConformance]", func() {
/*
Release: v1.12
Testname: ConfigMap Volume, text data, binary data
Description: The ConfigMap that is created with text data and binary data MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. ConfigMap's text data and binary data MUST be verified by reading the content from the mounted files in the Pod.
*/
framework.ConformanceIt("binary data should be reflected in volume [NodeConformance]", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
@@ -233,7 +239,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
Containers: []v1.Container{
{
Name: containerName1,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -245,7 +251,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
},
{
Name: containerName2,
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"hexdump", "-C", "/etc/configmap-volume/dump.bin"},
VolumeMounts: []v1.VolumeMount{
{
@@ -276,9 +282,9 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
})
/*
Testname: configmap-CUD-test
Description: Make sure Create, Update, Delete operations are all working
on config map and the result is observed on volumes mounted in containers.
Release : v1.9
Testname: ConfigMap Volume, create, update and delete
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the config map is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod. Also when the item(file) is deleted from the map that MUST result in a error reading that item(file).
*/
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
@@ -379,7 +385,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
Containers: []v1.Container{
{
Name: deleteContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/delete/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -391,7 +397,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
},
{
Name: updateContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/update/data-3"},
VolumeMounts: []v1.VolumeMount{
{
@@ -403,7 +409,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
},
{
Name: createContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -459,9 +465,9 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
})
/*
Testname: configmap-multiple-volumes
Description: Make sure config map works when it mounted as two different
volumes on the same node.
Release : v1.9
Testname: ConfigMap Volume, multiple volume maps
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to multiple paths in the Pod. The content MUST be accessible from all the mapped volume mounts.
*/
framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func() {
var (
@@ -509,7 +515,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{"--file_content=/etc/configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -589,7 +595,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d
Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/configmap-volume/data-1",
"--file_mode=/etc/configmap-volume/data-1"},
@@ -675,7 +681,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item
Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{"--file_content=/etc/configmap-volume/path/to/data-2",
"--file_mode=/etc/configmap-volume/path/to/data-2"},
VolumeMounts: []v1.VolumeMount{

View File

@@ -14,10 +14,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e_node
package common
import (
"fmt"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
@@ -27,6 +28,11 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
)
const (
ContainerStatusRetryTimeout = time.Minute * 5
ContainerStatusPollInterval = time.Second * 1
)
// One pod one container
type ConformanceContainer struct {
Container v1.Container

View File

@@ -50,9 +50,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
/*
Testname: pods-readiness-probe-initial-delay
Description: Make sure that pod with readiness probe should not be
ready before initial delay and never restart.
Release : v1.9
Testname: Pod readiness probe, with initial delay
Description: Create a Pod that is configured with a initial delay set on the readiness probe. Check the Pod Start time to compare to the initial delay. The Pod MUST be ready only after the specified initial delay.
*/
framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart [NodeConformance]", func() {
p := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil))
@@ -82,9 +82,10 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
/*
Testname: pods-readiness-probe-failure
Description: Make sure that pod with readiness probe that fails should
never be ready and never restart.
Release : v1.9
Testname: Pod readiness probe, failure
Description: Create a Pod with a readiness probe that fails consistently. When this Pod is created,
then the Pod MUST never be ready, never be running and restart count MUST be zero.
*/
framework.ConformanceIt("with readiness probe that fails should never be ready and never restart [NodeConformance]", func() {
p := podClient.Create(makePodSpec(probe.withFailing().build(), nil))
@@ -107,9 +108,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
/*
Testname: pods-cat-liveness-probe-restarted
Description: Make sure the pod is restarted with a cat /tmp/health
liveness probe.
Release : v1.9
Testname: Pod liveness probe, using local file, restart
Description: Create a Pod with liveness probe that that uses ExecAction handler to cat /temp/health file. The Container deletes the file /temp/health after 10 second, triggering liveness probe to fail. The Pod MUST now be killed and restarted incrementing restart count to 1.
*/
framework.ConformanceIt("should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func() {
runLivenessTest(f, &v1.Pod{
@@ -121,7 +122,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
Containers: []v1.Container{
{
Name: "liveness",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 10; rm -rf /tmp/health; sleep 600"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
@@ -139,9 +140,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
/*
Testname: pods-cat-liveness-probe-not-restarted
Description: Make sure the pod is not restarted with a cat /tmp/health
liveness probe.
Release : v1.9
Testname: Pod liveness probe, using local file, no restart
Description: Pod is created with liveness probe that uses exec command to cat /temp/health file. Liveness probe MUST not fail to check health and the restart count should remain 0.
*/
framework.ConformanceIt("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func() {
runLivenessTest(f, &v1.Pod{
@@ -153,7 +154,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
Containers: []v1.Container{
{
Name: "liveness",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 600"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
@@ -171,9 +172,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
/*
Testname: pods-http-liveness-probe-restarted
Description: Make sure when http liveness probe fails, the pod should
be restarted.
Release : v1.9
Testname: Pod liveness probe, using http endpoint, restart
Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1.
*/
framework.ConformanceIt("should be restarted with a /healthz http liveness probe [NodeConformance]", func() {
runLivenessTest(f, &v1.Pod{
@@ -205,9 +206,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
// Slow by design (5 min)
/*
Testname: pods-restart-count
Description: Make sure when a pod gets restarted, its start count
should increase.
Release : v1.9
Testname: Pod liveness probe, using http endpoint, multiple restarts (slow)
Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1. The liveness probe must fail again after restart once the http handler for /healthz enpoind on the Pod returns an http error after 10 seconds from the start. Restart counts MUST increment everytime health check fails, measure upto 5 restart.
*/
framework.ConformanceIt("should have monotonically increasing restart count [Slow][NodeConformance]", func() {
runLivenessTest(f, &v1.Pod{
@@ -238,9 +239,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
/*
Testname: pods-http-liveness-probe-not-restarted
Description: Make sure when http liveness probe succeeds, the pod
should not be restarted.
Release : v1.9
Testname: Pod liveness probe, using http endpoint, failure
Description: A Pod is created with liveness probe on http endpoint /. Liveness probe on this endpoint will not fail. When liveness probe does not fail then the restart count MUST remain zero.
*/
framework.ConformanceIt("should *not* be restarted with a /healthz http liveness probe [NodeConformance]", func() {
runLivenessTest(f, &v1.Pod{
@@ -252,7 +253,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
Containers: []v1.Container{
{
Name: "liveness",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
Ports: []v1.ContainerPort{{ContainerPort: 80}},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
@@ -272,9 +273,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
/*
Testname: pods-docker-liveness-probe-timeout
Description: Make sure that the pod is restarted with a docker exec
liveness probe with timeout.
Release : v1.9
Testname: Pod liveness probe, docker exec, restart
Description: A Pod is created with liveness probe with a Exec action on the Pod. If the liveness probe call does not return within the timeout specified, liveness probe MUST restart the Pod.
*/
It("should be restarted with a docker exec liveness probe with timeout ", func() {
// TODO: enable this test once the default exec handler supports timeout.
@@ -288,7 +289,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
Containers: []v1.Container{
{
Name: "liveness",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "sleep 600"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{

View File

@@ -28,10 +28,9 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
f := framework.NewDefaultFramework("containers")
/*
Testname: container-without-command-args
Description: When a Pod is created neither 'command' nor 'args' are
provided for a Container, ensure that the docker image's default
command and args are used.
Release : v1.9
Testname: Docker containers, without command and arguments
Description: Default command and arguments from the docker image entrypoint MUST be used when Pod does not specify the container command
*/
framework.ConformanceIt("should use the image defaults if command and args are blank [NodeConformance]", func() {
f.TestContainerOutput("use defaults", entrypointTestPod(), 0, []string{
@@ -40,10 +39,9 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
})
/*
Testname: container-with-args
Description: When a Pod is created and 'args' are provided for a
Container, ensure that they take precedent to the docker image's
default arguments, but that the default command is used.
Release : v1.9
Testname: Docker containers, with arguments
Description: Default command and from the docker image entrypoint MUST be used when Pod does not specify the container command but the arguments from Pod spec MUST override when specified.
*/
framework.ConformanceIt("should be able to override the image's default arguments (docker cmd) [NodeConformance]", func() {
pod := entrypointTestPod()
@@ -57,10 +55,9 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
// Note: when you override the entrypoint, the image's arguments (docker cmd)
// are ignored.
/*
Testname: container-with-command
Description: When a Pod is created and 'command' is provided for a
Container, ensure that it takes precedent to the docker image's default
command.
Release : v1.9
Testname: Docker containers, with command
Description: Default command from the docker image entrypoint MUST NOT be used when Pod specifies the container command. Command from Pod spec MUST override the command in the image.
*/
framework.ConformanceIt("should be able to override the image's default command (docker entrypoint) [NodeConformance]", func() {
pod := entrypointTestPod()
@@ -72,10 +69,9 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
})
/*
Testname: container-with-command-args
Description: When a Pod is created and 'command' and 'args' are
provided for a Container, ensure that they take precedent to the docker
image's default command and arguments.
Release : v1.9
Testname: Docker containers, with command and arguments
Description: Default command and arguments from the docker image entrypoint MUST NOT be used when Pod specifies the container command and arguments. Command and arguments from Pod spec MUST override the command and arguments in the image.
*/
framework.ConformanceIt("should be able to override the image's default command and arguments [NodeConformance]", func() {
pod := entrypointTestPod()

View File

@@ -25,6 +25,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
)
@@ -38,9 +39,9 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
f := framework.NewDefaultFramework("downward-api")
/*
Testname: downwardapi-env-name-namespace-podip
Description: Ensure that downward API can provide pod's name, namespace
and IP address as environment variables.
Release : v1.9
Testname: DownwardAPI, environment for name, namespace and ip
Description: Downward API MUST expose Pod and Container fields as environment variables. Specify Pod Name, namespace and IP as environment variable in the Pod Spec are visible at runtime in the container.
*/
framework.ConformanceIt("should provide pod name, namespace and IP address as env vars [NodeConformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID())
@@ -84,9 +85,9 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
})
/*
Testname: downwardapi-env-host-ip
Description: Ensure that downward API can provide an IP address for
host node as an environment variable.
Release : v1.9
Testname: DownwardAPI, environment for host ip
Description: Downward API MUST expose Pod and Container fields as environment variables. Specify host IP as environment variable in the Pod Spec are visible at runtime in the container.
*/
framework.ConformanceIt("should provide host IP as an env var [NodeConformance]", func() {
framework.SkipUnlessServerVersionGTE(hostIPVersion, f.ClientSet.Discovery())
@@ -111,9 +112,9 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
})
/*
Testname: downwardapi-env-limits-requests
Description: Ensure that downward API can provide CPU/memory limit
and CPU/memory request as environment variables.
Release : v1.9
Testname: DownwardAPI, environment for CPU and memory limits and requests
Description: Downward API MUST expose CPU request amd Memory request set through environment variables at runtime in the container.
*/
framework.ConformanceIt("should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID())
@@ -162,10 +163,9 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
})
/*
Testname: downwardapi-env-default-allocatable
Description: Ensure that downward API can provide default node
allocatable values for CPU and memory as environment variables if CPU
and memory limits are not specified for a container.
Release : v1.9
Testname: DownwardAPI, environment for default CPU and memory limits and requests
Description: Downward API MUST expose CPU request amd Memory limits set through environment variables at runtime in the container.
*/
framework.ConformanceIt("should provide default limits.cpu/memory from node allocatable [NodeConformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID())
@@ -200,7 +200,7 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
Env: env,
},
@@ -213,9 +213,9 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
})
/*
Testname: downwardapi-env-pod-uid
Description: Ensure that downward API can provide pod UID as an
environment variable.
Release : v1.9
Testname: DownwardAPI, environment for Pod UID
Description: Downward API MUST expose Pod UID set through environment variables at runtime in the container.
*/
framework.ConformanceIt("should provide pod UID as env vars [NodeConformance]", func() {
framework.SkipUnlessServerVersionGTE(podUIDVersion, f.ClientSet.Discovery())
@@ -300,7 +300,7 @@ var _ = framework.KubeDescribe("Downward API [Serial] [Disruptive] [NodeFeature:
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
Env: env,
},
@@ -325,7 +325,7 @@ func testDownwardAPI(f *framework.Framework, podName string, env []v1.EnvVar, ex
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
@@ -357,7 +357,7 @@ func testDownwardAPIForEphemeralStorage(f *framework.Framework, podName string,
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{

View File

@@ -25,6 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -40,9 +41,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-podname
Description: Ensure that downward API can provide pod's name through
DownwardAPIVolumeFiles.
Release : v1.9
Testname: DownwardAPI volume, pod name
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the Pod name. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
*/
framework.ConformanceIt("should provide podname only [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -54,9 +55,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-set-default-mode
Description: Ensure that downward API can set default file permission
mode for DownwardAPIVolumeFiles if no mode is specified.
Release : v1.9
Testname: DownwardAPI volume, volume mode 0400
Description: A Pod is configured with DownwardAPIVolumeSource with the volumesource mode set to -r-------- and DownwardAPIVolumeFiles contains a item for the Pod name. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
*/
framework.ConformanceIt("should set DefaultMode on files [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -69,9 +70,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-set-mode
Description: Ensure that downward API can set file permission mode for
DownwardAPIVolumeFiles.
Release : v1.9
Testname: DownwardAPI volume, file mode 0400
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the Pod name with the file mode set to -r--------. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
*/
framework.ConformanceIt("should set mode on item file [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -113,9 +114,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-update-label
Description: Ensure that downward API updates labels in
DownwardAPIVolumeFiles when pod's labels get modified.
Release : v1.9
Testname: DownwardAPI volume, update label
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains list of items for each of the Pod labels. The container runtime MUST be able to access Pod labels from the specified path on the mounted volume. Update the labels by adding a new label to the running Pod. The new label MUST be available from the mounted volume.
*/
framework.ConformanceIt("should update labels on modification [NodeConformance]", func() {
labels := map[string]string{}
@@ -145,9 +146,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-update-annotation
Description: Ensure that downward API updates annotations in
DownwardAPIVolumeFiles when pod's annotations get modified.
Release : v1.9
Testname: DownwardAPI volume, update annotations
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains list of items for each of the Pod annotations. The container runtime MUST be able to access Pod annotations from the specified path on the mounted volume. Update the annotations by adding a new annotation to the running Pod. The new annotation MUST be available from the mounted volume.
*/
framework.ConformanceIt("should update annotations on modification [NodeConformance]", func() {
annotations := map[string]string{}
@@ -179,9 +180,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-cpu-limit
Description: Ensure that downward API can provide container's CPU limit
through DownwardAPIVolumeFiles.
Release : v1.9
Testname: DownwardAPI volume, CPU limits
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the CPU limits. The container runtime MUST be able to access CPU limits from the specified path on the mounted volume.
*/
framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -193,9 +194,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-memory-limit
Description: Ensure that downward API can provide container's memory
limit through DownwardAPIVolumeFiles.
Release : v1.9
Testname: DownwardAPI volume, memory limits
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the memory limits. The container runtime MUST be able to access memory limits from the specified path on the mounted volume.
*/
framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -207,9 +208,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-cpu-request
Description: Ensure that downward API can provide container's CPU
request through DownwardAPIVolumeFiles.
Release : v1.9
Testname: DownwardAPI volume, CPU request
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the CPU request. The container runtime MUST be able to access CPU request from the specified path on the mounted volume.
*/
framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -221,9 +222,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-memory-request
Description: Ensure that downward API can provide container's memory
request through DownwardAPIVolumeFiles.
Release : v1.9
Testname: DownwardAPI volume, memory request
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the memory request. The container runtime MUST be able to access memory request from the specified path on the mounted volume.
*/
framework.ConformanceIt("should provide container's memory request [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -235,10 +236,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-default-cpu
Description: Ensure that downward API can provide default node
allocatable value for CPU through DownwardAPIVolumeFiles if CPU
limit is not specified for a container.
Release : v1.9
Testname: DownwardAPI volume, CPU limit, default node allocatable
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the CPU limits. CPU limits is not specified for the container. The container runtime MUST be able to access CPU limits from the specified path on the mounted volume and the value MUST be default node allocatable.
*/
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -248,10 +248,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-default-memory
Description: Ensure that downward API can provide default node
allocatable value for memory through DownwardAPIVolumeFiles if memory
limit is not specified for a container.
Release : v1.9
Testname: DownwardAPI volume, memory limit, default node allocatable
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the memory limits. memory limits is not specified for the container. The container runtime MUST be able to access memory limits from the specified path on the mounted volume and the value MUST be default node allocatable.
*/
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -268,7 +267,7 @@ func downwardAPIVolumePodForModeTest(name, filePath string, itemMode, defaultMod
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--file_mode=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
@@ -294,7 +293,7 @@ func downwardAPIVolumePodForSimpleTest(name string, filePath string) *v1.Pod {
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--file_content=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
@@ -325,7 +324,7 @@ func downwardAPIVolumeBaseContainers(name, filePath string) []v1.Container {
return []v1.Container{
{
Name: name,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--file_content=" + filePath},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
@@ -353,7 +352,7 @@ func downwardAPIVolumeDefaultBaseContainer(name, filePath string) []v1.Container
return []v1.Container{
{
Name: name,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--file_content=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
@@ -372,7 +371,7 @@ func downwardAPIVolumePodForUpdateTest(name string, labels, annotations map[stri
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath},
VolumeMounts: []v1.VolumeMount{
{

View File

@@ -67,139 +67,126 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() {
})
/*
Testname: volume-emptydir-mode-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure the volume has 0777 unix file permissions and tmpfs
mount type.
Release : v1.9
Testname: EmptyDir, medium memory, volume mode default
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs.
*/
framework.ConformanceIt("volume on tmpfs should have the correct mode [NodeConformance]", func() {
doTestVolumeMode(f, testImageRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-root-0644-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a root owned file with 0644 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
Release : v1.9
Testname: EmptyDir, medium memory, volume mode 0644
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0644. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (root,0644,tmpfs) [NodeConformance]", func() {
doTest0644(f, testImageRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-root-0666-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a root owned file with 0666 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
Release : v1.9
Testname: EmptyDir, medium memory, volume mode 0666
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0666. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (root,0666,tmpfs) [NodeConformance]", func() {
doTest0666(f, testImageRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-root-0777-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a root owned file with 0777 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
Release : v1.9
Testname: EmptyDir, medium memory, volume mode 0777
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0777. The volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (root,0777,tmpfs) [NodeConformance]", func() {
doTest0777(f, testImageRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-user-0644-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a user owned file with 0644 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
Release : v1.9
Testname: EmptyDir, medium memory, volume mode 0644, non-root user
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0644. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (non-root,0644,tmpfs) [NodeConformance]", func() {
doTest0644(f, testImageNonRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-user-0666-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a user owned file with 0666 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
Release : v1.9
Testname: EmptyDir, medium memory, volume mode 0666,, non-root user
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0666. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (non-root,0666,tmpfs) [NodeConformance]", func() {
doTest0666(f, testImageNonRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-user-0777-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a user owned file with 0777 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
Release : v1.9
Testname: EmptyDir, medium memory, volume mode 0777, non-root user
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0777. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (non-root,0777,tmpfs) [NodeConformance]", func() {
doTest0777(f, testImageNonRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-mode
Description: For a Pod created with an 'emptyDir' Volume, ensure the
volume has 0777 unix file permissions.
Release : v1.9
Testname: EmptyDir, medium default, volume mode default
Description: A Pod created with an 'emptyDir' Volume, the volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs.
*/
framework.ConformanceIt("volume on default medium should have the correct mode [NodeConformance]", func() {
doTestVolumeMode(f, testImageRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-root-0644
Description: For a Pod created with an 'emptyDir' Volume, ensure a
root owned file with 0644 unix file permissions is created and enforced
correctly.
Release : v1.9
Testname: EmptyDir, medium default, volume mode 0644
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0644. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (root,0644,default) [NodeConformance]", func() {
doTest0644(f, testImageRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-root-0666
Description: For a Pod created with an 'emptyDir' Volume, ensure a
root owned file with 0666 unix file permissions is created and enforced
correctly.
Release : v1.9
Testname: EmptyDir, medium default, volume mode 0666
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0666. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (root,0666,default) [NodeConformance]", func() {
doTest0666(f, testImageRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-root-0777
Description: For a Pod created with an 'emptyDir' Volume, ensure a
root owned file with 0777 unix file permissions is created and enforced
correctly.
Release : v1.9
Testname: EmptyDir, medium default, volume mode 0777
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0777. The volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (root,0777,default) [NodeConformance]", func() {
doTest0777(f, testImageRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-user-0644
Description: For a Pod created with an 'emptyDir' Volume, ensure a
user owned file with 0644 unix file permissions is created and enforced
correctly.
Release : v1.9
Testname: EmptyDir, medium default, volume mode 0644
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0644. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (non-root,0644,default) [NodeConformance]", func() {
doTest0644(f, testImageNonRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-user-0666
Description: For a Pod created with an 'emptyDir' Volume, ensure a
user owned file with 0666 unix file permissions is created and enforced
correctly.
Release : v1.9
Testname: EmptyDir, medium default, volume mode 0666
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0666. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (non-root,0666,default) [NodeConformance]", func() {
doTest0666(f, testImageNonRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-user-0777
Description: For a Pod created with an 'emptyDir' Volume, ensure a
user owned file with 0777 unix file permissions is created and enforced
correctly.
Release : v1.9
Testname: EmptyDir, medium default, volume mode 0777
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0777. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (non-root,0777,default) [NodeConformance]", func() {
doTest0777(f, testImageNonRootUid, v1.StorageMediumDefault)

View File

@@ -21,6 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -33,9 +34,9 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
f := framework.NewDefaultFramework("var-expansion")
/*
Testname: var-expansion-env
Description: Make sure environment variables can be set using an
expansion of previously defined environment variables
Release : v1.9
Testname: Environment variables, expansion
Description: Create a Pod with environment variables. Environment variables defined using previously defined environment variables MUST expand to proper values.
*/
framework.ConformanceIt("should allow composing env vars into new env vars [NodeConformance]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
@@ -48,7 +49,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
Env: []v1.EnvVar{
{
@@ -78,9 +79,9 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
})
/*
Testname: var-expansion-command
Description: Make sure a container's commands can be set using an
expansion of environment variables.
Release : v1.9
Testname: Environment variables, command expansion
Description: Create a Pod with environment variables and container command using them. Container command using the defined environment variables MUST expand to proper values.
*/
framework.ConformanceIt("should allow substituting values in a container's command [NodeConformance]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
@@ -93,7 +94,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "TEST_VAR=wrong echo \"$(TEST_VAR)\""},
Env: []v1.EnvVar{
{
@@ -113,9 +114,9 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
})
/*
Testname: var-expansion-arg
Description: Make sure a container's args can be set using an
expansion of environment variables.
Release : v1.9
Testname: Environment variables, command argument expansion
Description: Create a Pod with environment variables and container command arguments using them. Container command arguments using the defined environment variables MUST expand to proper values.
*/
framework.ConformanceIt("should allow substituting values in a container's args [NodeConformance]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
@@ -128,7 +129,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c"},
Args: []string{"TEST_VAR=wrong echo \"$(TEST_VAR)\""},
Env: []v1.EnvVar{
@@ -164,7 +165,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "test -d /testcontainer/" + podName + ";echo $?"},
Env: []v1.EnvVar{
{
@@ -225,7 +226,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Env: []v1.EnvVar{
{
Name: "POD_NAME",
@@ -274,7 +275,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Env: []v1.EnvVar{
{
Name: "POD_NAME",

View File

@@ -24,6 +24,7 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
)
@@ -40,10 +41,9 @@ var _ = Describe("[sig-storage] HostPath", func() {
})
/*
Testname: volume-hostpath-mode
Description: For a Pod created with a 'HostPath' Volume, ensure the
volume is a directory with 0777 unix file permissions and that is has
the sticky bit (mode flag t) set.
Release : v1.9
Testname: Host path, volume mode default
Description: Create a Pod with host volume mounted. The volume mounted MUST be a directory with permissions mode -rwxrwxrwx and that is has the sticky bit (mode flag t) set.
*/
framework.ConformanceIt("should give a volume the correct mode [NodeConformance]", func() {
source := &v1.HostPathVolumeSource{
@@ -136,7 +136,9 @@ var _ = Describe("[sig-storage] HostPath", func() {
// Create the subPath directory on the host
existing := path.Join(source.Path, subPath)
result, err := framework.SSH(fmt.Sprintf("mkdir -p %s", existing), framework.GetNodeExternalIP(&nodeList.Items[0]), framework.TestContext.Provider)
externalIP, err := framework.GetNodeExternalIP(&nodeList.Items[0])
framework.ExpectNoError(err)
result, err := framework.SSH(fmt.Sprintf("mkdir -p %s", existing), externalIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
framework.ExpectNoError(err)
if result.Code != 0 {
@@ -180,7 +182,9 @@ var _ = Describe("[sig-storage] HostPath", func() {
// Create the subPath file on the host
existing := path.Join(source.Path, subPath)
result, err := framework.SSH(fmt.Sprintf("echo \"mount-tester new file\" > %s", existing), framework.GetNodeExternalIP(&nodeList.Items[0]), framework.TestContext.Provider)
externalIP, err := framework.GetNodeExternalIP(&nodeList.Items[0])
framework.ExpectNoError(err)
result, err := framework.SSH(fmt.Sprintf("echo \"mount-tester new file\" > %s", existing), externalIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
framework.ExpectNoError(err)
if result.Code != 0 {
@@ -236,7 +240,7 @@ func testPodWithHostVol(path string, source *v1.HostPathVolumeSource) *v1.Pod {
Containers: []v1.Container{
{
Name: containerName1,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
@@ -249,7 +253,7 @@ func testPodWithHostVol(path string, source *v1.HostPathVolumeSource) *v1.Pod {
},
{
Name: containerName2,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"fmt"
"strconv"
"time"
@@ -26,6 +27,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/watch"
watchtools "k8s.io/client-go/tools/watch"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/test/e2e/framework"
@@ -42,7 +44,15 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
podClient = f.PodClient()
})
It("should invoke init containers on a RestartNever pod", func() {
/*
Release: v1.12
Testname: init-container-starts-app-restartnever-pod
Description: Ensure that all InitContainers are started
and all containers in pod are voluntarily terminated with exit status 0,
and the system is not going to restart any of these containers
when Pod has restart policy as RestartNever.
*/
framework.ConformanceIt("should invoke init containers on a RestartNever pod", func() {
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
@@ -59,19 +69,19 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
InitContainers: []v1.Container{
{
Name: "init1",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
},
{
Name: "init2",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
},
},
Containers: []v1.Container{
{
Name: "run1",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
},
},
@@ -82,7 +92,9 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodCompleted)
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
defer cancel()
event, err := watchtools.UntilWithoutRetry(ctx, wr, conditions.PodCompleted)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
@@ -99,7 +111,15 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
}
})
It("should invoke init containers on a RestartAlways pod", func() {
/*
Release: v1.12
Testname: init-container-starts-app-restartalways-pod
Description: Ensure that all InitContainers are started
and all containers in pod started
and at least one container is still running or is in the process of being restarted
when Pod has restart policy as RestartAlways.
*/
framework.ConformanceIt("should invoke init containers on a RestartAlways pod", func() {
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
@@ -115,12 +135,12 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
InitContainers: []v1.Container{
{
Name: "init1",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
},
{
Name: "init2",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
},
},
@@ -131,7 +151,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(50*1024*1024, resource.DecimalSI),
},
},
},
@@ -143,7 +163,9 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodRunning)
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
defer cancel()
event, err := watchtools.UntilWithoutRetry(ctx, wr, conditions.PodRunning)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
@@ -160,7 +182,15 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
}
})
It("should not start app containers if init containers fail on a RestartAlways pod", func() {
/*
Release: v1.12
Testname: init-container-fails-stops-app-restartalways-pod
Description: Ensure that app container is not started
when all InitContainers failed to start
and Pod has restarted for few occurrences
and pod has restart policy as RestartAlways.
*/
framework.ConformanceIt("should not start app containers if init containers fail on a RestartAlways pod", func() {
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
@@ -177,12 +207,12 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
InitContainers: []v1.Container{
{
Name: "init1",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/false"},
},
{
Name: "init2",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
},
},
@@ -193,7 +223,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(50*1024*1024, resource.DecimalSI),
},
},
},
@@ -206,8 +236,10 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(
framework.PodStartTimeout, wr,
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
defer cancel()
event, err := watchtools.UntilWithoutRetry(
ctx, wr,
// check for the first container to fail at least once
func(evt watch.Event) (bool, error) {
switch t := evt.Object.(type) {
@@ -268,7 +300,13 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
})
It("should not start app containers and fail the pod if init containers fail on a RestartNever pod", func() {
/*
Release: v1.12
Testname: init-container-fails-stops-app-restartnever-pod
Description: Ensure that app container is not started
when atleast one InitContainer fails to start and Pod has restart policy as RestartNever.
*/
framework.ConformanceIt("should not start app containers and fail the pod if init containers fail on a RestartNever pod", func() {
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
@@ -285,24 +323,24 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
InitContainers: []v1.Container{
{
Name: "init1",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
},
{
Name: "init2",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/false"},
},
},
Containers: []v1.Container{
{
Name: "run1",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(50*1024*1024, resource.DecimalSI),
},
},
},
@@ -316,8 +354,10 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(
framework.PodStartTimeout, wr,
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
defer cancel()
event, err := watchtools.UntilWithoutRetry(
ctx, wr,
// check for the second container to fail at least once
func(evt watch.Event) (bool, error) {
switch t := evt.Object.(type) {

View File

@@ -51,9 +51,12 @@ var _ = framework.KubeDescribe("KubeletManagedEtcHosts", func() {
}
/*
Testname: kubelet-managed-etc-hosts
Description: Make sure Kubelet correctly manages /etc/hosts and mounts
it into the container.
Release : v1.9
Testname: Kubelet, managed etc hosts
Description: Create a Pod with containers with hostNetwork set to false, one of the containers mounts the /etc/hosts file form the host. Create a second Pod with hostNetwork set to true.
1. The Pod with hostNetwork=false MUST have /etc/hosts of containers managed by the Kubelet.
2. The Pod with hostNetwork=false but the container mounts /etc/hosts file from the host. The /etc/hosts file MUST not be managed by the Kubelet.
3. The Pod with hostNetwork=true , /etc/hosts file MUST not be managed by the Kubelet.
*/
framework.ConformanceIt("should test kubelet managed /etc/hosts file [NodeConformance]", func() {
By("Setting up the test")

View File

@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e_node
package common
import (
"time"
@@ -84,6 +84,11 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
}, preStopWaitTimeout, podCheckInterval).Should(BeNil())
}
}
/*
Release : v1.9
Testname: Pod Lifecycle, post start exec hook
Description: When a post start handler is specified in the container lifecycle using a Exec action, then the handler MUST be invoked after the start of the container. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a post start that invokes the server pod using ExecAction to validate that the post start is executed.
*/
framework.ConformanceIt("should execute poststart exec hook properly [NodeConformance]", func() {
lifecycle := &v1.Lifecycle{
PostStart: &v1.Handler{
@@ -95,6 +100,11 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
podWithHook := getPodWithHook("pod-with-poststart-exec-hook", imageutils.GetE2EImage(imageutils.Hostexec), lifecycle)
testPodWithHook(podWithHook)
})
/*
Release : v1.9
Testname: Pod Lifecycle, prestop exec hook
Description: When a pre-stop handler is specified in the container lifecycle using a Exec action, then the handler MUST be invoked before the container is terminated. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a pre-stop that invokes the server pod using ExecAction to validate that the pre-stop is executed.
*/
framework.ConformanceIt("should execute prestop exec hook properly [NodeConformance]", func() {
lifecycle := &v1.Lifecycle{
PreStop: &v1.Handler{
@@ -106,6 +116,11 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
podWithHook := getPodWithHook("pod-with-prestop-exec-hook", imageutils.GetE2EImage(imageutils.Hostexec), lifecycle)
testPodWithHook(podWithHook)
})
/*
Release : v1.9
Testname: Pod Lifecycle, post start http hook
Description: When a post start handler is specified in the container lifecycle using a HttpGet action, then the handler MUST be invoked after the start of the container. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a post start that invokes the server pod to validate that the post start is executed.
*/
framework.ConformanceIt("should execute poststart http hook properly [NodeConformance]", func() {
lifecycle := &v1.Lifecycle{
PostStart: &v1.Handler{
@@ -119,6 +134,11 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
podWithHook := getPodWithHook("pod-with-poststart-http-hook", imageutils.GetPauseImageName(), lifecycle)
testPodWithHook(podWithHook)
})
/*
Release : v1.9
Testname: Pod Lifecycle, prestop http hook
Description: When a pre-stop handler is specified in the container lifecycle using a HttpGet action, then the handler MUST be invoked before the container is terminated. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a pre-stop that invokes the server pod to validate that the pre-stop is executed.
*/
framework.ConformanceIt("should execute prestop http hook properly [NodeConformance]", func() {
lifecycle := &v1.Lifecycle{
PreStop: &v1.Handler{

View File

@@ -31,9 +31,10 @@ var _ = Describe("[sig-network] Networking", func() {
// expect exactly one unique hostname. Each of these endpoints reports
// its own hostname.
/*
Testname: networking-intra-pod-http
Description: Try to hit test endpoints from a test container and make
sure each of them can report a unique hostname.
Release : v1.9
Testname: Networking, intra pod http
Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes.
The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/
framework.ConformanceIt("should function for intra-pod communication: http [NodeConformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f)
@@ -43,9 +44,10 @@ var _ = Describe("[sig-network] Networking", func() {
})
/*
Testname: networking-intra-pod-udp
Description: Try to hit test endpoints from a test container using udp
and make sure each of them can report a unique hostname.
Release : v1.9
Testname: Networking, intra pod udp
Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes.
The kubectl exec on the webserver container MUST reach a udp port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/
framework.ConformanceIt("should function for intra-pod communication: udp [NodeConformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f)
@@ -55,9 +57,10 @@ var _ = Describe("[sig-network] Networking", func() {
})
/*
Testname: networking-node-pod-http
Description: Try to hit test endpoints from the pod and make sure each
of them can report a unique hostname.
Release : v1.9
Testname: Networking, intra pod http, from node
Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes.
The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster using a http post(protocol=tcp) and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/
framework.ConformanceIt("should function for node-pod communication: http [NodeConformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f)
@@ -67,9 +70,10 @@ var _ = Describe("[sig-network] Networking", func() {
})
/*
Testname: networking-node-pod-udp
Description: Try to hit test endpoints from the pod using udp and make sure
each of them can report a unique hostname.
Release : v1.9
Testname: Networking, intra pod http, from node
Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes.
The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster using a http post(protocol=udp) and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/
framework.ConformanceIt("should function for node-pod communication: udp [NodeConformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f)

92
vendor/k8s.io/kubernetes/test/e2e/common/node_lease.go generated vendored Normal file
View File

@@ -0,0 +1,92 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"time"
coordv1beta1 "k8s.io/api/coordination/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("[Feature:NodeLease][NodeAlphaFeature:NodeLease]", func() {
f := framework.NewDefaultFramework("node-lease-test")
Context("when the NodeLease feature is enabled", func() {
It("the Kubelet should create and update a lease in the kube-node-lease namespace", func() {
leaseClient := f.ClientSet.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease)
var (
err error
lease *coordv1beta1.Lease
)
// check that lease for this Kubelet exists in the kube-node-lease namespace
Eventually(func() error {
lease, err = leaseClient.Get(framework.TestContext.NodeName, metav1.GetOptions{})
if err != nil {
return err
}
return nil
}, 5*time.Minute, 5*time.Second).Should(BeNil())
// check basic expectations for the lease
Expect(expectLease(lease)).To(BeNil())
// ensure that at least one lease renewal happens within the
// lease duration by checking for a change to renew time
Eventually(func() error {
newLease, err := leaseClient.Get(framework.TestContext.NodeName, metav1.GetOptions{})
if err != nil {
return err
}
// check basic expectations for the latest lease
if err := expectLease(newLease); err != nil {
return err
}
// check that RenewTime has been updated on the latest lease
newTime := (*newLease.Spec.RenewTime).Time
oldTime := (*lease.Spec.RenewTime).Time
if !newTime.After(oldTime) {
return fmt.Errorf("new lease has time %v, which is not after old lease time %v", newTime, oldTime)
}
return nil
}, time.Duration(*lease.Spec.LeaseDurationSeconds)*time.Second,
time.Duration(*lease.Spec.LeaseDurationSeconds/3)*time.Second)
})
})
})
func expectLease(lease *coordv1beta1.Lease) error {
// expect values for HolderIdentity, LeaseDurationSeconds, and RenewTime
if lease.Spec.HolderIdentity == nil {
return fmt.Errorf("Spec.HolderIdentity should not be nil")
}
if lease.Spec.LeaseDurationSeconds == nil {
return fmt.Errorf("Spec.LeaseDurationSeconds should not be nil")
}
if lease.Spec.RenewTime == nil {
return fmt.Errorf("Spec.RenewTime should not be nil")
}
// ensure that the HolderIdentity matches the node name
if *lease.Spec.HolderIdentity != framework.TestContext.NodeName {
return fmt.Errorf("Spec.HolderIdentity (%v) should match the node name (%v)", *lease.Spec.HolderIdentity, framework.TestContext.NodeName)
}
return nil
}

View File

@@ -39,6 +39,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/types"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@@ -46,6 +47,10 @@ var (
buildBackOffDuration = time.Minute
syncLoopFrequency = 10 * time.Second
maxBackOffTolerance = time.Duration(1.3 * float64(kubelet.MaxContainerBackOff))
// maxReadyStatusUpdateTolerance specifies the latency that allows kubelet to update pod status.
// When kubelet is under heavy load (tests may be parallelized), the delay may be longer, hence
// causing tests to be flaky.
maxReadyStatusUpdateTolerance = 10 * time.Second
)
// testHostIP tests that a pod gets a host IP
@@ -129,9 +134,9 @@ var _ = framework.KubeDescribe("Pods", func() {
})
/*
Testname: pods-created-pod-assigned-hostip
Description: Make sure when a pod is created that it is assigned a host IP
Address.
Release : v1.9
Testname: Pods, assigned hostip
Description: Create a Pod. Pod status MUST return successfully and contains a valid IP address.
*/
framework.ConformanceIt("should get a host IP [NodeConformance]", func() {
name := "pod-hostip-" + string(uuid.NewUUID())
@@ -151,9 +156,9 @@ var _ = framework.KubeDescribe("Pods", func() {
})
/*
Testname: pods-submitted-removed
Description: Makes sure a pod is created, a watch can be setup for the pod,
pod creation was observed, pod is deleted, and pod deletion is observed.
Release : v1.9
Testname: Pods, lifecycle
Description: A Pod is created with a unique label. Pod MUST be accessible when queried using the label selector upon creation. Add a watch, check if the Pod is running. Pod then deleted, The pod deletion timestamp is observed. The watch MUST return the pod deleted event. Query with the original selector for the Pod MUST return empty list.
*/
framework.ConformanceIt("should be submitted and removed [NodeConformance]", func() {
By("creating the pod")
@@ -171,7 +176,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
},
},
},
@@ -277,8 +282,9 @@ var _ = framework.KubeDescribe("Pods", func() {
})
/*
Testname: pods-updated-successfully
Description: Make sure it is possible to successfully update a pod's labels.
Release : v1.9
Testname: Pods, update
Description: Create a Pod with a unique label. Query for the Pod with the label as selector MUST be successful. Update the pod to change the value of the Label. Query for the Pod with the new value for the label MUST be successful.
*/
framework.ConformanceIt("should be updated [NodeConformance]", func() {
By("creating the pod")
@@ -296,7 +302,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
},
},
},
@@ -330,10 +336,9 @@ var _ = framework.KubeDescribe("Pods", func() {
})
/*
Testname: pods-update-active-deadline-seconds
Description: Make sure it is possible to create a pod, update its
activeDeadlineSecondsValue, and then waits for the deadline to pass
and verifies the pod is terminated.
Release : v1.9
Testname: Pods, ActiveDeadlineSeconds
Description: Create a Pod with a unique label. Query for the Pod with the label as selector MUST be successful. The Pod is updated with ActiveDeadlineSeconds set on the Pod spec. Pod MUST terminate of the specified time elapses.
*/
framework.ConformanceIt("should allow activeDeadlineSeconds to be updated [NodeConformance]", func() {
By("creating the pod")
@@ -351,7 +356,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
},
},
},
@@ -377,9 +382,9 @@ var _ = framework.KubeDescribe("Pods", func() {
})
/*
Testname: pods-contain-services-environment-variables
Description: Make sure that when a pod is created it contains environment
variables for each active service.
Release : v1.9
Testname: Pods, service environment variables
Description: Create a server Pod listening on port 9376. A Service called fooservice is created for the server Pod listening on port 8765 targeting port 8080. If a new Pod is created in the cluster then the Pod MUST have the fooservice environment variables available from this new Pod. The new create Pod MUST have environment variables such as FOOSERVICE_SERVICE_HOST, FOOSERVICE_SERVICE_PORT, FOOSERVICE_PORT, FOOSERVICE_PORT_8765_TCP_PORT, FOOSERVICE_PORT_8765_TCP_PROTO, FOOSERVICE_PORT_8765_TCP and FOOSERVICE_PORT_8765_TCP_ADDR that are populated with proper values.
*/
framework.ConformanceIt("should contain environment variables for services [NodeConformance]", func() {
// Make a pod that will be a service.
@@ -442,7 +447,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: containerName,
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
},
},
@@ -481,7 +486,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: "main",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "echo container is alive; sleep 600"},
},
},
@@ -499,8 +504,8 @@ var _ = framework.KubeDescribe("Pods", func() {
Param("stderr", "1").
Param("stdout", "1").
Param("container", pod.Spec.Containers[0].Name).
Param("command", "cat").
Param("command", "/etc/resolv.conf")
Param("command", "echo").
Param("command", "remote execution test")
url := req.URL()
ws, err := framework.OpenWebSocketForURL(url, config, []string{"channel.k8s.io"})
@@ -536,8 +541,8 @@ var _ = framework.KubeDescribe("Pods", func() {
if buf.Len() == 0 {
return fmt.Errorf("Unexpected output from server")
}
if !strings.Contains(buf.String(), "nameserver") {
return fmt.Errorf("Expected to find 'nameserver' in %q", buf.String())
if !strings.Contains(buf.String(), "remote execution test") {
return fmt.Errorf("Expected to find 'remote execution test' in %q", buf.String())
}
return nil
}, time.Minute, 10*time.Second).Should(BeNil())
@@ -557,7 +562,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: "main",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "echo container is alive; sleep 10000"},
},
},
@@ -612,7 +617,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: containerName,
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "sleep 5", "/crash/missing"},
},
},
@@ -623,7 +628,7 @@ var _ = framework.KubeDescribe("Pods", func() {
By("updating the image")
podClient.Update(podName, func(pod *v1.Pod) {
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.NginxSlim)
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.Nginx)
})
time.Sleep(syncLoopFrequency)
@@ -653,7 +658,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: containerName,
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "sleep 5", "/crash/missing"},
},
},
@@ -694,4 +699,64 @@ var _ = framework.KubeDescribe("Pods", func() {
framework.Failf("expected %s back-off got=%s on delay2", kubelet.MaxContainerBackOff, delay2)
}
})
// TODO(freehan): label the test to be [NodeConformance] after tests are proven to be stable.
It("should support pod readiness gates [NodeFeature:PodReadinessGate]", func() {
podName := "pod-ready"
readinessGate1 := "k8s.io/test-condition1"
readinessGate2 := "k8s.io/test-condition2"
patchStatusFmt := `{"status":{"conditions":[{"type":%q, "status":%q}]}}`
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{"test": "pod-readiness-gate"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pod-readiness-gate",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "echo container is alive; sleep 10000"},
},
},
ReadinessGates: []v1.PodReadinessGate{
{ConditionType: v1.PodConditionType(readinessGate1)},
{ConditionType: v1.PodConditionType(readinessGate2)},
},
},
}
validatePodReadiness := func(expectReady bool) {
Expect(wait.Poll(time.Second, maxReadyStatusUpdateTolerance, func() (bool, error) {
podReady := podClient.PodIsReady(podName)
res := expectReady == podReady
if !res {
framework.Logf("Expect the Ready condition of pod %q to be %v, but got %v", podName, expectReady, podReady)
}
return res, nil
})).NotTo(HaveOccurred())
}
By("submitting the pod to kubernetes")
podClient.CreateSync(pod)
Expect(podClient.PodIsReady(podName)).To(BeFalse(), "Expect pod's Ready condition to be false initially.")
By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate1))
_, err := podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), "status")
Expect(err).NotTo(HaveOccurred())
// Sleep for 10 seconds.
time.Sleep(maxReadyStatusUpdateTolerance)
Expect(podClient.PodIsReady(podName)).To(BeFalse(), "Expect pod's Ready condition to be false with only one condition in readinessGates equal to True")
By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate2))
_, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), "status")
Expect(err).NotTo(HaveOccurred())
validatePodReadiness(true)
By(fmt.Sprintf("patching pod status with condition %q to false", readinessGate1))
_, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), "status")
Expect(err).NotTo(HaveOccurred())
validatePodReadiness(false)
})
})

View File

@@ -24,6 +24,7 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
type PrivilegedPodTestConfig struct {
@@ -90,14 +91,14 @@ func (c *PrivilegedPodTestConfig) createPodsSpec() *v1.Pod {
Containers: []v1.Container{
{
Name: c.privilegedContainer,
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: &v1.SecurityContext{Privileged: &isPrivileged},
Command: []string{"/bin/sleep", "10000"},
},
{
Name: c.notPrivilegedContainer,
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: &v1.SecurityContext{Privileged: &notPrivileged},
Command: []string{"/bin/sleep", "10000"},

View File

@@ -26,6 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -36,16 +37,18 @@ var _ = Describe("[sig-storage] Projected", func() {
f := framework.NewDefaultFramework("projected")
/*
Testname: projected-secret-no-defaultMode
Description: Simple projected Secret test with no defaultMode set.
Release : v1.9
Testname: Projected Volume, Secrets, volume mode default
Description: A Pod is created with a projected volume source secret to store a secret with a specified key with default permission mode. Pod MUST be able to read the content of the key successfully and the mode MUST be -rw-r--r-- by default.
*/
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil)
})
/*
Testname: projected-secret-with-defaultMode
Description: Simple projected Secret test with defaultMode set.
Release : v1.9
Testname: Projected Volume, Secrets, volume mode 0400
Description: A Pod is created with a projected volume source secret to store a secret with a specified key with permission mode set to 0x400 on the Pod. Pod MUST be able to read the content of the key successfully and the mode MUST be -r—-—————.
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() {
defaultMode := int32(0400)
@@ -53,9 +56,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-secret-with-nonroot-defaultMode-fsGroup
Description: Simple projected Secret test as non-root with
defaultMode and fsGroup set.
Release : v1.9
Testname: Project Volume, Secrets, non-root, custom fsGroup
Description: A Pod is created with a projected volume source secret to store a secret with a specified key. The volume has permission mode set to 0440, fsgroup set to 1001 and user set to non-root uid of 1000. Pod MUST be able to read the content of the key successfully and the mode MUST be -r—-r————-.
*/
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance]", func() {
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
@@ -65,19 +68,18 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-secret-simple-mapped
Description: Simple projected Secret test, by setting a secret and
mounting it to a volume with a custom path (mapping) on the pod with
no other settings and make sure the pod actually consumes it.
Release : v1.9
Testname: Projected Volume, Secrets, mapped
Description: A Pod is created with a projected volume source secret to store a secret with a specified key with default permission mode. The secret is also mapped to a custom path. Pod MUST be able to read the content of the key successfully and the mode MUST be -r—-—————— on the mapped volume.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
doProjectedSecretE2EWithMapping(f, nil)
})
/*
Testname: projected-secret-with-item-mode-mapped
Description: Repeat the projected-secret-simple-mapped but this time
with an item mode (e.g. 0400) for the secret map item.
Release : v1.9
Testname: Projected Volume, Secrets, mapped, volume mode 0400
Description: A Pod is created with a projected volume source secret to store a secret with a specified key with permission mode set to 0400. The secret is also mapped to a specific name. Pod MUST be able to read the content of the key successfully and the mode MUST be -r—-—————— on the mapped volume.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [NodeConformance]", func() {
mode := int32(0400)
@@ -106,9 +108,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-secret-multiple-volumes
Description: Make sure secrets works when mounted as two different
volumes on the same node.
Release : v1.9
Testname: Projected Volume, Secrets, mapped, multiple paths
Description: A Pod is created with a projected volume source secret to store a secret with a specified key. The secret is mapped to two different volume mounts. Pod MUST be able to read the content of the key successfully from the two volume mounts and the mode MUST be -r—-—————— on the mapped volumes.
*/
framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func() {
// This test ensures that the same secret can be mounted in multiple
@@ -171,7 +173,7 @@ var _ = Describe("[sig-storage] Projected", func() {
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/projected-secret-volume/data-1",
"--file_mode=/etc/projected-secret-volume/data-1"},
@@ -200,8 +202,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-secret-simple-optional
Description: Make sure secrets works when optional updates included.
Release : v1.9
Testname: Projected Volume, Secrets, create, update delete
Description: Create a Pod with three containers with secrets namely a create, update and delete container. Create Container when started MUST no have a secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container.
*/
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
@@ -320,7 +323,7 @@ var _ = Describe("[sig-storage] Projected", func() {
Containers: []v1.Container{
{
Name: deleteContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-secret-volumes/delete/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -332,7 +335,7 @@ var _ = Describe("[sig-storage] Projected", func() {
},
{
Name: updateContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-secret-volumes/update/data-3"},
VolumeMounts: []v1.VolumeMount{
{
@@ -344,7 +347,7 @@ var _ = Describe("[sig-storage] Projected", func() {
},
{
Name: createContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-secret-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -401,18 +404,18 @@ var _ = Describe("[sig-storage] Projected", func() {
// Part 2/3 - ConfigMaps
/*
Testname: projected-volume-configMap-nomappings-succeeds
Description: Make sure that a projected volume with a configMap with
no mappings succeeds properly.
Release : v1.9
Testname: Projected Volume, ConfigMap, volume mode default
Description: A Pod is created with projected volume source ConfigMap to store a configMap with default permission mode. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw-r—-r—-.
*/
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
doProjectedConfigMapE2EWithoutMappings(f, 0, 0, nil)
})
/*
Testname: projected-volume-configMap-consumable-defaultMode
Description: Make sure that a projected volume configMap is consumable
with defaultMode set.
Release : v1.9
Testname: Projected Volume, ConfigMap, volume mode 0400
Description: A Pod is created with projected volume source ConfigMap to store a configMap with permission mode set to 0400. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -r——-——-—-.
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() {
defaultMode := int32(0400)
@@ -425,9 +428,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-volume-configMap-consumable-nonroot
Description: Make sure that a projected volume configMap is consumable
by a non-root userID.
Release : v1.9
Testname: Projected Volume, ConfigMap, non-root user
Description: A Pod is created with projected volume source ConfigMap to store a configMap as non-root user with uid 1000. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw—r——r—-.
*/
framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func() {
doProjectedConfigMapE2EWithoutMappings(f, 1000, 0, nil)
@@ -438,19 +441,18 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-configmap-simple-mapped
Description: Simplest projected ConfigMap test, by setting a config
map and mounting it to a volume with a custom path (mapping) on the
pod with no other settings and make sure the pod actually consumes it.
Release : v1.9
Testname: Projected Volume, ConfigMap, mapped
Description: A Pod is created with projected volume source ConfigMap to store a configMap with default permission mode. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -rw—r——r—-.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
doProjectedConfigMapE2EWithMappings(f, 0, 0, nil)
})
/*
Testname: projected-secret-with-item-mode-mapped
Description: Repeat the projected-secret-simple-mapped but this time
with an item mode (e.g. 0400) for the secret map item
Release : v1.9
Testname: Projected Volume, ConfigMap, mapped, volume mode 0400
Description: A Pod is created with projected volume source ConfigMap to store a configMap with permission mode set to 0400. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -r-—r——r—-.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [NodeConformance]", func() {
mode := int32(0400)
@@ -458,9 +460,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-configmap-simpler-user-mapped
Description: Repeat the projected-config-map-simple-mapped but this
time with a user other than root.
Release : v1.9
Testname: Projected Volume, ConfigMap, mapped, non-root user
Description: A Pod is created with projected volume source ConfigMap to store a configMap as non-root user with uid 1000. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -r-—r——r—-.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func() {
doProjectedConfigMapE2EWithMappings(f, 1000, 0, nil)
@@ -471,10 +473,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-volume-configMaps-updated-successfully
Description: Make sure that if a projected volume has configMaps,
that the values in these configMaps can be updated, deleted,
and created.
Release : v1.9
Testname: Projected Volume, ConfigMap, update
Description: A Pod is created with projected volume source ConfigMap to store a configMap and performs a create and update to new value. Pod MUST be able to create the configMap with value-1. Pod MUST be able to update the value in the confgiMap to value-2.
*/
framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
@@ -526,7 +527,7 @@ var _ = Describe("[sig-storage] Projected", func() {
Containers: []v1.Container{
{
Name: containerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -560,10 +561,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-volume-optional-configMaps-updated-successfully
Description: Make sure that if a projected volume has optional
configMaps, that the values in these configMaps can be updated,
deleted, and created.
Release : v1.9
Testname: Projected Volume, ConfigMap, create, update and delete
Description: Create a Pod with three containers with ConfigMaps namely a create, update and delete container. Create Container when started MUST not have configMap, update and delete containers MUST be created with a ConfigMap value as value-1. Create a configMap in the create container, the Pod MUST be able to read the configMap from the create container. Update the configMap in the update container, Pod MUST be able to read the updated configMap value. Delete the configMap in the delete container. Pod MUST fail to read the configMap from the delete container.
*/
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
@@ -682,7 +682,7 @@ var _ = Describe("[sig-storage] Projected", func() {
Containers: []v1.Container{
{
Name: deleteContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volumes/delete/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -694,7 +694,7 @@ var _ = Describe("[sig-storage] Projected", func() {
},
{
Name: updateContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volumes/update/data-3"},
VolumeMounts: []v1.VolumeMount{
{
@@ -706,7 +706,7 @@ var _ = Describe("[sig-storage] Projected", func() {
},
{
Name: createContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -762,9 +762,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-configmap-multiple-volumes
Description: Make sure config map works when it mounted as two
different volumes on the same node.
Release : v1.9
Testname: Projected Volume, ConfigMap, multiple volume paths
Description: A Pod is created with a projected volume source ConfigMap to store a configMap. The configMap is mapped to two different volume mounts. Pod MUST be able to read the content of the configMap successfully from the two volume mounts.
*/
framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func() {
var (
@@ -825,7 +825,7 @@ var _ = Describe("[sig-storage] Projected", func() {
Containers: []v1.Container{
{
Name: "projected-configmap-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{"--file_content=/etc/projected-configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -860,9 +860,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-podname
Description: Ensure that downward API can provide pod's name through
DownwardAPIVolumeFiles in a projected volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, pod name
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the pod name from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide podname only [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -874,10 +874,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-set-default-mode
Description: Ensure that downward API can set default file permission
mode for DownwardAPIVolumeFiles if no mode is specified in a projected
volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, volume mode 0400
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The default mode for the volume mount is set to 0400. Pod MUST be able to read the pod name from the mounted DownwardAPIVolumeFiles and the volume mode must be -r—-—————.
*/
framework.ConformanceIt("should set DefaultMode on files [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -890,9 +889,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-set-mode
Description: Ensure that downward API can set file permission mode for
DownwardAPIVolumeFiles in a projected volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, volume mode 0400
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The default mode for the volume mount is set to 0400. Pod MUST be able to read the pod name from the mounted DownwardAPIVolumeFiles and the volume mode must be -r—-—————.
*/
framework.ConformanceIt("should set mode on item file [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -934,10 +933,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-update-label
Description: Ensure that downward API updates labels in
DownwardAPIVolumeFiles when pod's labels get modified in a projected
volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, update labels
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests and label items. Pod MUST be able to read the labels from the mounted DownwardAPIVolumeFiles. Labels are then updated. Pod MUST be able to read the updated values for the Labels.
*/
framework.ConformanceIt("should update labels on modification [NodeConformance]", func() {
labels := map[string]string{}
@@ -967,10 +965,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-update-annotation
Description: Ensure that downward API updates annotations in
DownwardAPIVolumeFiles when pod's annotations get modified in a
projected volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, update annotation
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests and annotation items. Pod MUST be able to read the annotations from the mounted DownwardAPIVolumeFiles. Annotations are then updated. Pod MUST be able to read the updated values for the Annotations.
*/
framework.ConformanceIt("should update annotations on modification [NodeConformance]", func() {
annotations := map[string]string{}
@@ -1002,9 +999,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-cpu-limit
Description: Ensure that downward API can provide container's CPU
limit through DownwardAPIVolumeFiles in a projected volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, CPU limits
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the cpu limits from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -1016,9 +1013,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-memory-limit
Description: Ensure that downward API can provide container's memory
limit through DownwardAPIVolumeFiles in a projected volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, memory limits
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the memory limits from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -1030,9 +1027,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-cpu-request
Description: Ensure that downward API can provide container's CPU
request through DownwardAPIVolumeFiles in a projected volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, CPU request
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the cpu request from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -1044,9 +1041,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-memory-request
Description: Ensure that downward API can provide container's memory
request through DownwardAPIVolumeFiles in a projected volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, memory request
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the memory request from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide container's memory request [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -1058,10 +1055,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-default-cpu
Description: Ensure that downward API can provide default node
allocatable value for CPU through DownwardAPIVolumeFiles if CPU limit
is not specified for a container in a projected volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, CPU limit, node allocatable
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The CPU and memory resources for requests and limits are NOT specified for the container. Pod MUST be able to read the default cpu limits from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -1071,10 +1067,9 @@ var _ = Describe("[sig-storage] Projected", func() {
})
/*
Testname: projected-downwardapi-volume-default-memory
Description: Ensure that downward API can provide default node
allocatable value for memory through DownwardAPIVolumeFiles if memory
limit is not specified for a container in a projected volume.
Release : v1.9
Testname: Projected Volume, DownwardAPI, memory limit, node allocatable
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The CPU and memory resources for requests and limits are NOT specified for the container. Pod MUST be able to read the default memory limits from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@@ -1085,9 +1080,9 @@ var _ = Describe("[sig-storage] Projected", func() {
// Test multiple projections
/*
Testname: projected-configmap-secret-same-dir
Description: This test projects a secret and configmap into the same
directory to ensure projection is working as intended.
Release : v1.9
Testname: Projected Volume, multiple projections
Description: A Pod is created with a projected volume source for secrets, configMap and downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the secrets, configMap values and the cpu and memory limits as well as cpu and memory requests from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should project all components that make up the projection API [Projection][NodeConformance]", func() {
var err error
@@ -1126,7 +1121,7 @@ var _ = Describe("[sig-storage] Projected", func() {
pod.Spec.Containers = []v1.Container{
{
Name: "projected-all-volume-test",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "cat /all/podname && cat /all/secret-data && cat /all/configmap-data"},
VolumeMounts: []v1.VolumeMount{
{
@@ -1186,7 +1181,7 @@ func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int
Containers: []v1.Container{
{
Name: "projected-secret-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/projected-secret-volume/data-1",
"--file_mode=/etc/projected-secret-volume/data-1"},
@@ -1272,7 +1267,7 @@ func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) {
Containers: []v1.Container{
{
Name: "projected-secret-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/projected-secret-volume/new-path-data-1",
"--file_mode=/etc/projected-secret-volume/new-path-data-1"},
@@ -1349,7 +1344,7 @@ func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup
Containers: []v1.Container{
{
Name: "projected-configmap-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/projected-configmap-volume/data-1",
"--file_mode=/etc/projected-configmap-volume/data-1"},
@@ -1440,7 +1435,7 @@ func doProjectedConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup in
Containers: []v1.Container{
{
Name: "projected-configmap-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{"--file_content=/etc/projected-configmap-volume/path/to/data-2",
"--file_mode=/etc/projected-configmap-volume/path/to/data-2"},
VolumeMounts: []v1.VolumeMount{
@@ -1490,7 +1485,7 @@ func projectedDownwardAPIVolumePodForModeTest(name, filePath string, itemMode, d
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--file_mode=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
@@ -1516,7 +1511,7 @@ func projectedDownwardAPIVolumePodForUpdateTest(name string, labels, annotations
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", "--retry_time=1200", "--file_content_in_loop=" + filePath},
VolumeMounts: []v1.VolumeMount{
{

386
vendor/k8s.io/kubernetes/test/e2e/common/runtime.go generated vendored Normal file
View File

@@ -0,0 +1,386 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"path"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
gomegatypes "github.com/onsi/gomega/types"
)
var _ = framework.KubeDescribe("Container Runtime", func() {
f := framework.NewDefaultFramework("container-runtime")
Describe("blackbox test", func() {
Context("when starting a container that exits", func() {
It("should run with the expected status [NodeConformance]", func() {
restartCountVolumeName := "restart-count"
restartCountVolumePath := "/restart-count"
testContainer := v1.Container{
Image: framework.BusyBoxImage,
VolumeMounts: []v1.VolumeMount{
{
MountPath: restartCountVolumePath,
Name: restartCountVolumeName,
},
},
}
testVolumes := []v1.Volume{
{
Name: restartCountVolumeName,
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory},
},
},
}
testCases := []struct {
Name string
RestartPolicy v1.RestartPolicy
Phase v1.PodPhase
State ContainerState
RestartCount int32
Ready bool
}{
{"terminate-cmd-rpa", v1.RestartPolicyAlways, v1.PodRunning, ContainerStateRunning, 2, true},
{"terminate-cmd-rpof", v1.RestartPolicyOnFailure, v1.PodSucceeded, ContainerStateTerminated, 1, false},
{"terminate-cmd-rpn", v1.RestartPolicyNever, v1.PodFailed, ContainerStateTerminated, 0, false},
}
for _, testCase := range testCases {
// It failed at the 1st run, then succeeded at 2nd run, then run forever
cmdScripts := `
f=%s
count=$(echo 'hello' >> $f ; wc -l $f | awk {'print $1'})
if [ $count -eq 1 ]; then
exit 1
fi
if [ $count -eq 2 ]; then
exit 0
fi
while true; do sleep 1; done
`
tmpCmd := fmt.Sprintf(cmdScripts, path.Join(restartCountVolumePath, "restartCount"))
testContainer.Name = testCase.Name
testContainer.Command = []string{"sh", "-c", tmpCmd}
terminateContainer := ConformanceContainer{
PodClient: f.PodClient(),
Container: testContainer,
RestartPolicy: testCase.RestartPolicy,
Volumes: testVolumes,
PodSecurityContext: &v1.PodSecurityContext{
SELinuxOptions: &v1.SELinuxOptions{
Level: "s0",
},
},
}
terminateContainer.Create()
defer terminateContainer.Delete()
By(fmt.Sprintf("Container '%s': should get the expected 'RestartCount'", testContainer.Name))
Eventually(func() (int32, error) {
status, err := terminateContainer.GetStatus()
return status.RestartCount, err
}, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(Equal(testCase.RestartCount))
By(fmt.Sprintf("Container '%s': should get the expected 'Phase'", testContainer.Name))
Eventually(terminateContainer.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(Equal(testCase.Phase))
By(fmt.Sprintf("Container '%s': should get the expected 'Ready' condition", testContainer.Name))
Expect(terminateContainer.IsReady()).Should(Equal(testCase.Ready))
status, err := terminateContainer.GetStatus()
Expect(err).ShouldNot(HaveOccurred())
By(fmt.Sprintf("Container '%s': should get the expected 'State'", testContainer.Name))
Expect(GetContainerState(status.State)).To(Equal(testCase.State))
By(fmt.Sprintf("Container '%s': should be possible to delete [NodeConformance]", testContainer.Name))
Expect(terminateContainer.Delete()).To(Succeed())
Eventually(terminateContainer.Present, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(BeFalse())
}
})
rootUser := int64(0)
nonRootUser := int64(10000)
for _, testCase := range []struct {
name string
container v1.Container
phase v1.PodPhase
message gomegatypes.GomegaMatcher
}{
{
name: "if TerminationMessagePath is set [NodeConformance]",
container: v1.Container{
Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo -n DONE > /dev/termination-log"},
TerminationMessagePath: "/dev/termination-log",
SecurityContext: &v1.SecurityContext{
RunAsUser: &rootUser,
},
},
phase: v1.PodSucceeded,
message: Equal("DONE"),
},
{
name: "if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance]",
container: v1.Container{
Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo -n DONE > /dev/termination-custom-log"},
TerminationMessagePath: "/dev/termination-custom-log",
SecurityContext: &v1.SecurityContext{
RunAsUser: &nonRootUser,
},
},
phase: v1.PodSucceeded,
message: Equal("DONE"),
},
{
name: "from log output if TerminationMessagePolicy FallbackToLogOnError is set [NodeConformance]",
container: v1.Container{
Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo -n DONE; /bin/false"},
TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
},
phase: v1.PodFailed,
message: Equal("DONE\n"),
},
{
name: "as empty when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set [NodeConformance]",
container: v1.Container{
Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo DONE; /bin/true"},
TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
},
phase: v1.PodSucceeded,
message: Equal(""),
},
{
name: "from file when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set [NodeConformance]",
container: v1.Container{
Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo -n OK > /dev/termination-log; /bin/echo DONE; /bin/true"},
TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
},
phase: v1.PodSucceeded,
message: Equal("OK"),
},
} {
It(fmt.Sprintf("should report termination message %s", testCase.name), func() {
testCase.container.Name = "termination-message-container"
c := ConformanceContainer{
PodClient: f.PodClient(),
Container: testCase.container,
RestartPolicy: v1.RestartPolicyNever,
}
By("create the container")
c.Create()
defer c.Delete()
By(fmt.Sprintf("wait for the container to reach %s", testCase.phase))
Eventually(c.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(Equal(testCase.phase))
By("get the container status")
status, err := c.GetStatus()
Expect(err).NotTo(HaveOccurred())
By("the container should be terminated")
Expect(GetContainerState(status.State)).To(Equal(ContainerStateTerminated))
By("the termination message should be set")
Expect(status.State.Terminated.Message).Should(testCase.message)
By("delete the container")
Expect(c.Delete()).To(Succeed())
})
}
})
Context("when running a container with a new image", func() {
// The service account only has pull permission
auth := `
{
"auths": {
"https://gcr.io": {
"auth": "X2pzb25fa2V5OnsKICAidHlwZSI6ICJzZXJ2aWNlX2FjY291bnQiLAogICJwcm9qZWN0X2lkIjogImF1dGhlbnRpY2F0ZWQtaW1hZ2UtcHVsbGluZyIsCiAgInByaXZhdGVfa2V5X2lkIjogImI5ZjJhNjY0YWE5YjIwNDg0Y2MxNTg2MDYzZmVmZGExOTIyNGFjM2IiLAogICJwcml2YXRlX2tleSI6ICItLS0tLUJFR0lOIFBSSVZBVEUgS0VZLS0tLS1cbk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzdTSG5LVEVFaVlMamZcbkpmQVBHbUozd3JCY2VJNTBKS0xxS21GWE5RL3REWGJRK2g5YVl4aldJTDhEeDBKZTc0bVovS01uV2dYRjVLWlNcbm9BNktuSU85Yi9SY1NlV2VpSXRSekkzL1lYVitPNkNjcmpKSXl4anFWam5mVzJpM3NhMzd0OUE5VEZkbGZycm5cbjR6UkpiOWl4eU1YNGJMdHFGR3ZCMDNOSWl0QTNzVlo1ODhrb1FBZmgzSmhhQmVnTWorWjRSYko0aGVpQlFUMDNcbnZVbzViRWFQZVQ5RE16bHdzZWFQV2dydDZOME9VRGNBRTl4bGNJek11MjUzUG4vSzgySFpydEx4akd2UkhNVXhcbng0ZjhwSnhmQ3h4QlN3Z1NORit3OWpkbXR2b0wwRmE3ZGducFJlODZWRDY2ejNZenJqNHlLRXRqc2hLZHl5VWRcbkl5cVhoN1JSQWdNQkFBRUNnZ0VBT3pzZHdaeENVVlFUeEFka2wvSTVTRFVidi9NazRwaWZxYjJEa2FnbmhFcG9cbjFJajJsNGlWMTByOS9uenJnY2p5VlBBd3pZWk1JeDFBZVF0RDdoUzRHWmFweXZKWUc3NkZpWFpQUm9DVlB6b3VcbmZyOGRDaWFwbDV0enJDOWx2QXNHd29DTTdJWVRjZmNWdDdjRTEyRDNRS3NGNlo3QjJ6ZmdLS251WVBmK0NFNlRcbmNNMHkwaCtYRS9kMERvSERoVy96YU1yWEhqOFRvd2V1eXRrYmJzNGYvOUZqOVBuU2dET1lQd2xhbFZUcitGUWFcbkpSd1ZqVmxYcEZBUW14M0Jyd25rWnQzQ2lXV2lGM2QrSGk5RXRVYnRWclcxYjZnK1JRT0licWFtcis4YlJuZFhcbjZWZ3FCQWtKWjhSVnlkeFVQMGQxMUdqdU9QRHhCbkhCbmM0UW9rSXJFUUtCZ1FEMUNlaWN1ZGhXdGc0K2dTeGJcbnplanh0VjFONDFtZHVjQnpvMmp5b1dHbzNQVDh3ckJPL3lRRTM0cU9WSi9pZCs4SThoWjRvSWh1K0pBMDBzNmdcblRuSXErdi9kL1RFalk4MW5rWmlDa21SUFdiWHhhWXR4UjIxS1BYckxOTlFKS2ttOHRkeVh5UHFsOE1veUdmQ1dcbjJ2aVBKS05iNkhabnY5Q3lqZEo5ZzJMRG5RS0JnUUREcVN2eURtaGViOTIzSW96NGxlZ01SK205Z2xYVWdTS2dcbkVzZlllbVJmbU5XQitDN3ZhSXlVUm1ZNU55TXhmQlZXc3dXRldLYXhjK0krYnFzZmx6elZZdFpwMThNR2pzTURcbmZlZWZBWDZCWk1zVXQ3Qmw3WjlWSjg1bnRFZHFBQ0xwWitaLzN0SVJWdWdDV1pRMWhrbmxHa0dUMDI0SkVFKytcbk55SDFnM2QzUlFLQmdRQ1J2MXdKWkkwbVBsRklva0tGTkh1YTBUcDNLb1JTU1hzTURTVk9NK2xIckcxWHJtRjZcbkMwNGNTKzQ0N0dMUkxHOFVUaEpKbTRxckh0Ti9aK2dZOTYvMm1xYjRIakpORDM3TVhKQnZFYTN5ZUxTOHEvK1JcbjJGOU1LamRRaU5LWnhQcG84VzhOSlREWTVOa1BaZGh4a2pzSHdVNGRTNjZwMVRESUU0MGd0TFpaRFFLQmdGaldcbktyblFpTnEzOS9iNm5QOFJNVGJDUUFKbmR3anhTUU5kQTVmcW1rQTlhRk9HbCtqamsxQ1BWa0tNSWxLSmdEYkpcbk9heDl2OUc2Ui9NSTFIR1hmV3QxWU56VnRocjRIdHNyQTB0U3BsbWhwZ05XRTZWejZuQURqdGZQSnMyZUdqdlhcbmpQUnArdjhjY21MK3dTZzhQTGprM3ZsN2VlNXJsWWxNQndNdUdjUHhBb0dBZWRueGJXMVJMbVZubEFpSEx1L0xcbmxtZkF3RFdtRWlJMFVnK1BMbm9Pdk81dFE1ZDRXMS94RU44bFA0cWtzcGtmZk1Rbk5oNFNZR0VlQlQzMlpxQ1RcbkpSZ2YwWGpveXZ2dXA5eFhqTWtYcnBZL3ljMXpmcVRaQzBNTzkvMVVjMWJSR2RaMmR5M2xSNU5XYXA3T1h5Zk9cblBQcE5Gb1BUWGd2M3FDcW5sTEhyR3pNPVxuLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLVxuIiwKICAiY2xpZW50X2VtYWlsIjogImltYWdlLXB1bGxpbmdAYXV0aGVudGljYXRlZC1pbWFnZS1wdWxsaW5nLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwKICAiY2xpZW50X2lkIjogIjExMzc5NzkxNDUzMDA3MzI3ODcxMiIsCiAgImF1dGhfdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi9hdXRoIiwKICAidG9rZW5fdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi90b2tlbiIsCiAgImF1dGhfcHJvdmlkZXJfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9vYXV0aDIvdjEvY2VydHMiLAogICJjbGllbnRfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9yb2JvdC92MS9tZXRhZGF0YS94NTA5L2ltYWdlLXB1bGxpbmclNDBhdXRoZW50aWNhdGVkLWltYWdlLXB1bGxpbmcuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iCn0=",
"email": "image-pulling@authenticated-image-pulling.iam.gserviceaccount.com"
}
}
}`
secret := &v1.Secret{
Data: map[string][]byte{v1.DockerConfigJsonKey: []byte(auth)},
Type: v1.SecretTypeDockerConfigJson,
}
// The following images are not added into NodeImageWhiteList, because this test is
// testing image pulling, these images don't need to be prepulled. The ImagePullPolicy
// is v1.PullAlways, so it won't be blocked by framework image white list check.
for _, testCase := range []struct {
description string
image string
secret bool
phase v1.PodPhase
waiting bool
}{
{
description: "should not be able to pull image from invalid registry",
image: "invalid.com/invalid/alpine:3.1",
phase: v1.PodPending,
waiting: true,
},
{
description: "should not be able to pull non-existing image from gcr.io",
image: "k8s.gcr.io/invalid-image:invalid-tag",
phase: v1.PodPending,
waiting: true,
},
{
description: "should be able to pull image from gcr.io",
image: "k8s.gcr.io/alpine-with-bash:1.0",
phase: v1.PodRunning,
waiting: false,
},
{
description: "should be able to pull image from docker hub",
image: "alpine:3.1",
phase: v1.PodRunning,
waiting: false,
},
{
description: "should not be able to pull from private registry without secret",
image: "gcr.io/authenticated-image-pulling/alpine:3.1",
phase: v1.PodPending,
waiting: true,
},
{
description: "should be able to pull from private registry with secret",
image: "gcr.io/authenticated-image-pulling/alpine:3.1",
secret: true,
phase: v1.PodRunning,
waiting: false,
},
} {
testCase := testCase
It(testCase.description+" [NodeConformance]", func() {
name := "image-pull-test"
command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"}
container := ConformanceContainer{
PodClient: f.PodClient(),
Container: v1.Container{
Name: name,
Image: testCase.image,
Command: command,
// PullAlways makes sure that the image will always be pulled even if it is present before the test.
ImagePullPolicy: v1.PullAlways,
},
RestartPolicy: v1.RestartPolicyNever,
}
if testCase.secret {
secret.Name = "image-pull-secret-" + string(uuid.NewUUID())
By("create image pull secret")
_, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
Expect(err).NotTo(HaveOccurred())
defer f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil)
container.ImagePullSecrets = []string{secret.Name}
}
// checkContainerStatus checks whether the container status matches expectation.
checkContainerStatus := func() error {
status, err := container.GetStatus()
if err != nil {
return fmt.Errorf("failed to get container status: %v", err)
}
// We need to check container state first. The default pod status is pending, If we check
// pod phase first, and the expected pod phase is Pending, the container status may not
// even show up when we check it.
// Check container state
if !testCase.waiting {
if status.State.Running == nil {
return fmt.Errorf("expected container state: Running, got: %q",
GetContainerState(status.State))
}
}
if testCase.waiting {
if status.State.Waiting == nil {
return fmt.Errorf("expected container state: Waiting, got: %q",
GetContainerState(status.State))
}
reason := status.State.Waiting.Reason
if reason != images.ErrImagePull.Error() &&
reason != images.ErrImagePullBackOff.Error() {
return fmt.Errorf("unexpected waiting reason: %q", reason)
}
}
// Check pod phase
phase, err := container.GetPhase()
if err != nil {
return fmt.Errorf("failed to get pod phase: %v", err)
}
if phase != testCase.phase {
return fmt.Errorf("expected pod phase: %q, got: %q", testCase.phase, phase)
}
return nil
}
// The image registry is not stable, which sometimes causes the test to fail. Add retry mechanism to make this
// less flaky.
const flakeRetry = 3
for i := 1; i <= flakeRetry; i++ {
var err error
By("create the container")
container.Create()
By("check the container status")
for start := time.Now(); time.Since(start) < ContainerStatusRetryTimeout; time.Sleep(ContainerStatusPollInterval) {
if err = checkContainerStatus(); err == nil {
break
}
}
By("delete the container")
container.Delete()
if err == nil {
break
}
if i < flakeRetry {
framework.Logf("No.%d attempt failed: %v, retrying...", i, err)
} else {
framework.Failf("All %d attempts failed: %v", flakeRetry, err)
}
}
})
}
})
})
})

View File

@@ -23,6 +23,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
)
@@ -31,9 +32,9 @@ var _ = Describe("[sig-api-machinery] Secrets", func() {
f := framework.NewDefaultFramework("secrets")
/*
Testname: secret-env-vars
Description: Ensure that secret can be consumed via environment
variables.
Release : v1.9
Testname: Secrets, pod environment field
Description: Create a secret. Create a Pod with Container that declares a environment variable which references the secret created to extract a key value from the secret. Pod MUST have the environment variable that contains proper value for the key to the secret.
*/
framework.ConformanceIt("should be consumable from pods in env vars [NodeConformance]", func() {
name := "secret-test-" + string(uuid.NewUUID())
@@ -53,7 +54,7 @@ var _ = Describe("[sig-api-machinery] Secrets", func() {
Containers: []v1.Container{
{
Name: "secret-env-test",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
Env: []v1.EnvVar{
{
@@ -80,9 +81,9 @@ var _ = Describe("[sig-api-machinery] Secrets", func() {
})
/*
Testname: secret-configmaps-source
Description: Ensure that secret can be consumed via source of a set
of ConfigMaps.
Release : v1.9
Testname: Secrets, pod environment from source
Description: Create a secret. Create a Pod with Container that declares a environment variable using EnvFrom which references the secret created to extract a key value from the secret. Pod MUST have the environment variable that contains proper value for the key to the secret.
*/
framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func() {
name := "secret-test-" + string(uuid.NewUUID())
@@ -101,7 +102,7 @@ var _ = Describe("[sig-api-machinery] Secrets", func() {
Containers: []v1.Container{
{
Name: "env-test",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
EnvFrom: []v1.EnvFromSource{
{

View File

@@ -25,6 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -34,18 +35,18 @@ var _ = Describe("[sig-storage] Secrets", func() {
f := framework.NewDefaultFramework("secrets")
/*
Testname: secret-volume-mount-without-mapping
Description: Ensure that secret can be mounted without mapping to a
pod volume.
Release : v1.9
Testname: Secrets Volume, default
Description: Create a secret. Create a Pod with secret volume source configured into the container. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -rw-r--r-- by default.
*/
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
doSecretE2EWithoutMapping(f, nil /* default mode */, "secret-test-"+string(uuid.NewUUID()), nil, nil)
})
/*
Testname: secret-volume-mount-without-mapping-default-mode
Description: Ensure that secret can be mounted without mapping to a
pod volume in default mode.
Release : v1.9
Testname: Secrets Volume, volume mode 0400
Description: Create a secret. Create a Pod with secret volume source configured into the container with file mode set to 0x400. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -r——--—-—- by default.
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() {
defaultMode := int32(0400)
@@ -53,9 +54,9 @@ var _ = Describe("[sig-storage] Secrets", func() {
})
/*
Testname: secret-volume-mount-without-mapping-non-root-default-mode-fsgroup
Description: Ensure that secret can be mounted without mapping to a pod
volume as non-root in default mode with fsGroup set.
Release : v1.9
Testname: Secrets Volume, volume mode 0440, fsGroup 1001 and uid 1000
Description: Create a secret. Create a Pod with secret volume source configured into the container with file mode set to 0x440 as a non-root user with uid 1000 and fsGroup id 1001. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -r——r-—-—- by default.
*/
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance]", func() {
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
@@ -65,25 +66,30 @@ var _ = Describe("[sig-storage] Secrets", func() {
})
/*
Testname: secret-volume-mount-with-mapping
Description: Ensure that secret can be mounted with mapping to a pod
volume.
Release : v1.9
Testname: Secrets Volume, mapping
Description: Create a secret. Create a Pod with secret volume source configured into the container with a custom path. Pod MUST be able to read the secret from the mounted volume from the specified custom path. The file mode of the secret MUST be -rw—r-—r—- by default.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
doSecretE2EWithMapping(f, nil)
})
/*
Testname: secret-volume-mount-with-mapping-item-mode
Description: Ensure that secret can be mounted with mapping to a pod
volume in item mode.
Release : v1.9
Testname: Secrets Volume, mapping, volume mode 0400
Description: Create a secret. Create a Pod with secret volume source configured into the container with a custom path and file mode set to 0x400. Pod MUST be able to read the secret from the mounted volume from the specified custom path. The file mode of the secret MUST be -r-—r-—r—-.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [NodeConformance]", func() {
mode := int32(0400)
doSecretE2EWithMapping(f, &mode)
})
It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func() {
/*
Release : v1.12
Testname: Secrets Volume, volume mode default, secret with same name in different namespace
Description: Create a secret with same name in two namespaces. Create a Pod with secret volume source configured into the container. Pod MUST be able to read the secrets from the mounted volume from the container runtime and only secrets which are associated with namespace where pod is created. The file mode of the secret MUST be -rw-r--r-- by default.
*/
framework.ConformanceIt("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func() {
var (
namespace2 *v1.Namespace
err error
@@ -105,8 +111,9 @@ var _ = Describe("[sig-storage] Secrets", func() {
})
/*
Testname: secret-multiple-volume-mounts
Description: Ensure that secret can be mounted to multiple pod volumes.
Release : v1.9
Testname: Secrets Volume, mapping multiple volume paths
Description: Create a secret. Create a Pod with two secret volume sources configured into the container in to two different custom paths. Pod MUST be able to read the secret from the both the mounted volumes from the two specified custom paths.
*/
framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func() {
// This test ensures that the same secret can be mounted in multiple
@@ -153,7 +160,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/secret-volume/data-1",
"--file_mode=/etc/secret-volume/data-1"},
@@ -182,9 +189,9 @@ var _ = Describe("[sig-storage] Secrets", func() {
})
/*
Testname: secret-mounted-volume-optional-update-change
Description: Ensure that optional update change to secret can be
reflected on a mounted volume.
Release : v1.9
Testname: Secrets Volume, create, update and delete
Description: Create a Pod with three containers with secrets volume sources namely a create, update and delete container. Create Container when started MUST not have secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container.
*/
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
@@ -279,7 +286,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
Containers: []v1.Container{
{
Name: deleteContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/delete/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -291,7 +298,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
},
{
Name: updateContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/update/data-3"},
VolumeMounts: []v1.VolumeMount{
{
@@ -303,7 +310,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
},
{
Name: createContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@@ -406,7 +413,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/secret-volume/data-1",
"--file_mode=/etc/secret-volume/data-1"},
@@ -483,7 +490,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/secret-volume/new-path-data-1",
"--file_mode=/etc/secret-volume/new-path-data-1"},

View File

@@ -0,0 +1,266 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"strings"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
)
var _ = framework.KubeDescribe("Security Context", func() {
f := framework.NewDefaultFramework("security-context-test")
var podClient *framework.PodClient
BeforeEach(func() {
podClient = f.PodClient()
})
Context("When creating a container with runAsUser", func() {
makeUserPod := func(podName, image string, command []string, userid int64) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: image,
Name: podName,
Command: command,
SecurityContext: &v1.SecurityContext{
RunAsUser: &userid,
},
},
},
},
}
}
createAndWaitUserPod := func(userid int64) {
podName := fmt.Sprintf("busybox-user-%d-%s", userid, uuid.NewUUID())
podClient.Create(makeUserPod(podName,
framework.BusyBoxImage,
[]string{"sh", "-c", fmt.Sprintf("test $(id -u) -eq %d", userid)},
userid,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
}
/*
Release : v1.12
Testname: Security Context: runAsUser (id:65534)
Description: Container created with runAsUser option, passing an id (id:65534) uses that
given id when running the container.
*/
It("should run the container with uid 65534 [NodeConformance]", func() {
createAndWaitUserPod(65534)
})
/*
Release : v1.12
Testname: Security Context: runAsUser (id:0)
Description: Container created with runAsUser option, passing an id (id:0) uses that
given id when running the container.
*/
It("should run the container with uid 0 [NodeConformance]", func() {
createAndWaitUserPod(0)
})
})
Context("When creating a pod with readOnlyRootFilesystem", func() {
makeUserPod := func(podName, image string, command []string, readOnlyRootFilesystem bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: image,
Name: podName,
Command: command,
SecurityContext: &v1.SecurityContext{
ReadOnlyRootFilesystem: &readOnlyRootFilesystem,
},
},
},
},
}
}
createAndWaitUserPod := func(readOnlyRootFilesystem bool) string {
podName := fmt.Sprintf("busybox-readonly-%v-%s", readOnlyRootFilesystem, uuid.NewUUID())
podClient.Create(makeUserPod(podName,
framework.BusyBoxImage,
[]string{"sh", "-c", "touch checkfile"},
readOnlyRootFilesystem,
))
if readOnlyRootFilesystem {
podClient.WaitForFailure(podName, framework.PodStartTimeout)
} else {
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
}
return podName
}
/*
Release : v1.12
Testname: Security Context: readOnlyRootFilesystem=true.
Description: when a container has configured readOnlyRootFilesystem to true, write operations are not allowed.
*/
It("should run the container with readonly rootfs when readOnlyRootFilesystem=true [NodeConformance]", func() {
createAndWaitUserPod(true)
})
/*
Release : v1.12
Testname: Security Context: readOnlyRootFilesystem=false.
Description: when a container has configured readOnlyRootFilesystem to false, write operations are allowed.
*/
It("should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance]", func() {
createAndWaitUserPod(false)
})
})
Context("When creating a pod with privileged", func() {
makeUserPod := func(podName, image string, command []string, privileged bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: image,
Name: podName,
Command: command,
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
},
},
},
},
}
}
createAndWaitUserPod := func(privileged bool) string {
podName := fmt.Sprintf("busybox-privileged-%v-%s", privileged, uuid.NewUUID())
podClient.Create(makeUserPod(podName,
framework.BusyBoxImage,
[]string{"sh", "-c", "ip link add dummy0 type dummy || true"},
privileged,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
return podName
}
It("should run the container as unprivileged when false [NodeConformance]", func() {
podName := createAndWaitUserPod(false)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
}
framework.Logf("Got logs for pod %q: %q", podName, logs)
if !strings.Contains(logs, "Operation not permitted") {
framework.Failf("unprivileged container shouldn't be able to create dummy device")
}
})
})
Context("when creating containers with AllowPrivilegeEscalation", func() {
makeAllowPrivilegeEscalationPod := func(podName string, allowPrivilegeEscalation *bool, uid int64) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: imageutils.GetE2EImage(imageutils.Nonewprivs),
Name: podName,
SecurityContext: &v1.SecurityContext{
AllowPrivilegeEscalation: allowPrivilegeEscalation,
RunAsUser: &uid,
},
},
},
},
}
}
createAndMatchOutput := func(podName, output string, allowPrivilegeEscalation *bool, uid int64) error {
podClient.Create(makeAllowPrivilegeEscalationPod(podName,
allowPrivilegeEscalation,
uid,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
return podClient.MatchContainerOutput(podName, podName, output)
}
/*
Testname: allowPrivilegeEscalation unset and uid != 0.
Description: Configuring the allowPrivilegeEscalation unset, allows the privilege escalation operation.
A container is configured with allowPrivilegeEscalation not specified (nil) and a given uid which is not 0.
When the container is run, the container is run using uid=0.
*/
It("should allow privilege escalation when not explicitly set and uid != 0 [NodeConformance]", func() {
podName := "alpine-nnp-nil-" + string(uuid.NewUUID())
if err := createAndMatchOutput(podName, "Effective uid: 0", nil, 1000); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
/*
Testname: allowPrivilegeEscalation=false.
Description: Configuring the allowPrivilegeEscalation to false, does not allow the privilege escalation operation.
A container is configured with allowPrivilegeEscalation=false and a given uid (1000) which is not 0.
When the container is run, the container is run using uid=1000.
*/
It("should not allow privilege escalation when false [NodeConformance]", func() {
podName := "alpine-nnp-false-" + string(uuid.NewUUID())
apeFalse := false
if err := createAndMatchOutput(podName, "Effective uid: 1000", &apeFalse, 1000); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
/*
Testname: allowPrivilegeEscalation=true.
Description: Configuring the allowPrivilegeEscalation to true, allows the privilege escalation operation.
A container is configured with allowPrivilegeEscalation=true and a given uid (1000) which is not 0.
When the container is run, the container is run using uid=0 (making use of the privilege escalation).
*/
It("should allow privilege escalation when true [NodeConformance]", func() {
podName := "alpine-nnp-true-" + string(uuid.NewUUID())
apeTrue := true
if err := createAndMatchOutput(podName, "Effective uid: 0", &apeTrue, 1000); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
})
})

View File

@@ -22,6 +22,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/kubelet/sysctl"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -42,7 +43,7 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
Containers: []v1.Container{
{
Name: "test-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
},
},
RestartPolicy: v1.RestartPolicyNever,

View File

@@ -0,0 +1,98 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"time"
batch "k8s.io/api/batch/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/slice"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const dummyFinalizer = "k8s.io/dummy-finalizer"
var _ = framework.KubeDescribe("TTLAfterFinished", func() {
f := framework.NewDefaultFramework("ttlafterfinished")
alphaFeatureStr := "[Feature:TTLAfterFinished]"
It(fmt.Sprintf("Job should be deleted once it finishes after TTL seconds %s", alphaFeatureStr), func() {
testFinishedJob(f)
})
})
func cleanupJob(f *framework.Framework, job *batch.Job) {
ns := f.Namespace.Name
c := f.ClientSet
framework.Logf("Remove the Job's dummy finalizer; the Job should be deleted cascadingly")
removeFinalizerFunc := func(j *batch.Job) {
j.ObjectMeta.Finalizers = slice.RemoveString(j.ObjectMeta.Finalizers, dummyFinalizer, nil)
}
_, err := framework.UpdateJobWithRetries(c, ns, job.Name, removeFinalizerFunc)
Expect(err).NotTo(HaveOccurred())
framework.WaitForJobGone(c, ns, job.Name, wait.ForeverTestTimeout)
err = framework.WaitForAllJobPodsGone(c, ns, job.Name)
Expect(err).NotTo(HaveOccurred())
}
func testFinishedJob(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
parallelism := int32(1)
completions := int32(1)
backoffLimit := int32(2)
ttl := int32(10)
job := framework.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
job.Spec.TTLSecondsAfterFinished = &ttl
job.ObjectMeta.Finalizers = []string{dummyFinalizer}
defer cleanupJob(f, job)
framework.Logf("Create a Job %s/%s with TTL", job.Namespace, job.Name)
job, err := framework.CreateJob(c, ns, job)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Wait for the Job to finish")
err = framework.WaitForJobFinish(c, ns, job.Name)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Wait for TTL after finished controller to delete the Job")
err = framework.WaitForJobDeleting(c, ns, job.Name)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished")
job, err = framework.GetJob(c, ns, job.Name)
Expect(err).NotTo(HaveOccurred())
finishTime := framework.JobFinishTime(job)
finishTimeUTC := finishTime.UTC()
Expect(finishTime.IsZero()).NotTo(BeTrue())
deleteAtUTC := job.ObjectMeta.DeletionTimestamp.UTC()
Expect(deleteAtUTC).NotTo(BeNil())
expireAtUTC := finishTimeUTC.Add(time.Duration(ttl) * time.Second)
Expect(deleteAtUTC.Before(expireAtUTC)).To(BeFalse())
}

View File

@@ -40,11 +40,6 @@ const (
NodeE2E Suite = "node e2e"
)
var (
mountImage = imageutils.GetE2EImage(imageutils.Mounttest)
busyboxImage = "busybox"
)
var CurrentSuite Suite
// CommonImageWhiteList is the list of images used in common test. These images should be prepulled
@@ -52,20 +47,20 @@ var CurrentSuite Suite
// only used by node e2e test.
// TODO(random-liu): Change the image puller pod to use similar mechanism.
var CommonImageWhiteList = sets.NewString(
"busybox",
imageutils.GetE2EImage(imageutils.BusyBox),
imageutils.GetE2EImage(imageutils.EntrypointTester),
imageutils.GetE2EImage(imageutils.IpcUtils),
imageutils.GetE2EImage(imageutils.Liveness),
imageutils.GetE2EImage(imageutils.Mounttest),
imageutils.GetE2EImage(imageutils.MounttestUser),
imageutils.GetE2EImage(imageutils.Netexec),
imageutils.GetE2EImage(imageutils.NginxSlim),
imageutils.GetE2EImage(imageutils.Nginx),
imageutils.GetE2EImage(imageutils.ServeHostname),
imageutils.GetE2EImage(imageutils.TestWebserver),
imageutils.GetE2EImage(imageutils.Hostexec),
imageutils.GetE2EImage(imageutils.VolumeNFSServer),
imageutils.GetE2EImage(imageutils.VolumeGlusterServer),
imageutils.GetE2EImage(imageutils.E2ENet),
imageutils.GetE2EImage(imageutils.Net),
)
func svcByName(name string, port int) *v1.Service {

View File

@@ -62,7 +62,7 @@ var _ = Describe("[sig-storage] GCP Volumes", func() {
var c clientset.Interface
BeforeEach(func() {
framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu")
framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu", "custom")
namespace = f.Namespace
c = f.ClientSet

View File

@@ -75,10 +75,6 @@ func setupProviderConfig() error {
managedZones = []string{zone}
}
gceAlphaFeatureGate := gcecloud.NewAlphaFeatureGate([]string{
gcecloud.AlphaFeatureNetworkEndpointGroup,
})
gceCloud, err := gcecloud.CreateGCECloud(&gcecloud.CloudConfig{
ApiEndpoint: framework.TestContext.CloudConfig.ApiEndpoint,
ProjectID: framework.TestContext.CloudConfig.ProjectID,
@@ -91,7 +87,8 @@ func setupProviderConfig() error {
NodeInstancePrefix: "",
TokenSource: nil,
UseMetadataServer: false,
AlphaFeatureGate: gceAlphaFeatureGate})
AlphaFeatureGate: gcecloud.NewAlphaFeatureGate([]string{}),
})
if err != nil {
return fmt.Errorf("Error building GCE/GKE provider: %v", err)
@@ -186,41 +183,15 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
// #41007. To avoid those pods preventing the whole test runs (and just
// wasting the whole run), we allow for some not-ready pods (with the
// number equal to the number of allowed not-ready nodes).
if err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, framework.ImagePullerLabels); err != nil {
if err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
framework.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf)
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
framework.Failf("Error waiting for all pods to be running and ready: %v", err)
}
if err := framework.WaitForPodsSuccess(c, metav1.NamespaceSystem, framework.ImagePullerLabels, framework.ImagePrePullingTimeout); err != nil {
// There is no guarantee that the image pulling will succeed in 3 minutes
// and we don't even run the image puller on all platforms (including GKE).
// We wait for it so we get an indication of failures in the logs, and to
// maximize benefit of image pre-pulling.
framework.Logf("WARNING: Image pulling pods failed to enter success in %v: %v", framework.ImagePrePullingTimeout, err)
}
// Dump the output of the nethealth containers only once per run
if framework.TestContext.DumpLogsOnFailure {
logFunc := framework.Logf
if framework.TestContext.ReportDir != "" {
filePath := path.Join(framework.TestContext.ReportDir, "nethealth.txt")
file, err := os.Create(filePath)
if err != nil {
framework.Logf("Failed to create a file with network health data %v: %v\nPrinting to stdout", filePath, err)
} else {
defer file.Close()
if err = file.Chmod(0644); err != nil {
framework.Logf("Failed to chmod to 644 of %v: %v", filePath, err)
}
logFunc = framework.GetLogToFileFunc(file)
framework.Logf("Dumping network health container logs from all nodes to file %v", filePath)
}
} else {
framework.Logf("Dumping network health container logs from all nodes...")
}
framework.LogContainersInPodsWithLabels(c, metav1.NamespaceSystem, framework.ImagePullerLabels, "nethealth", logFunc)
if err := framework.WaitForDaemonSets(c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil {
framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
}
// Log the version of the server and this client.

View File

@@ -15,6 +15,7 @@ go_library(
"deployment_util.go",
"exec_util.go",
"firewall_util.go",
"flake_reporting_util.go",
"framework.go",
"get-kubemark-resource-usage.go",
"google_compute.go",
@@ -58,10 +59,12 @@ go_library(
"//pkg/cloudprovider/providers/gce:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/controller/job:go_default_library",
"//pkg/controller/nodelifecycle:go_default_library",
"//pkg/controller/service:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/apis/config:go_default_library",
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
"//pkg/kubelet/dockershim/metrics:go_default_library",
"//pkg/kubelet/events:go_default_library",
@@ -80,6 +83,58 @@ go_library(
"//pkg/util/taints:go_default_library",
"//pkg/util/version:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/apps/v1beta2:go_default_library",
"//staging/src/k8s.io/api/authorization/v1beta1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/version:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/discovery/cached:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
"//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/manifest:go_default_library",
@@ -103,56 +158,6 @@ go_library(
"//vendor/golang.org/x/net/websocket:go_default_library",
"//vendor/google.golang.org/api/compute/v1:go_default_library",
"//vendor/google.golang.org/api/googleapi:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
"//vendor/k8s.io/api/authorization/v1beta1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/version:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/discovery/cached:go_default_library",
"//vendor/k8s.io/client-go/dynamic:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/restmapper:go_default_library",
"//vendor/k8s.io/client-go/scale:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//vendor/k8s.io/client-go/tools/remotecommand:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)

View File

@@ -17,7 +17,7 @@ limitations under the License.
package framework
import (
"fmt"
"github.com/golang/glog"
"sync"
"time"
@@ -62,7 +62,7 @@ func WaitForNamedAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviews
// GKE doesn't enable the SAR endpoint. Without this endpoint, we cannot determine if the policy engine
// has adjusted as expected. In this case, simply wait one second and hope it's up to date
if apierrors.IsNotFound(err) {
fmt.Printf("SubjectAccessReview endpoint is missing\n")
glog.Info("SubjectAccessReview endpoint is missing")
time.Sleep(1 * time.Second)
return true, nil
}
@@ -94,7 +94,7 @@ func BindClusterRole(c v1beta1rbac.ClusterRoleBindingsGetter, clusterRole, ns st
// if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled.
if err != nil {
fmt.Printf("Error binding clusterrole/%s for %q for %v\n", clusterRole, ns, subjects)
glog.Errorf("Error binding clusterrole/%s for %q for %v\n", clusterRole, ns, subjects)
}
}
@@ -124,7 +124,7 @@ func bindInNamespace(c v1beta1rbac.RoleBindingsGetter, roleType, role, ns string
// if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled.
if err != nil {
fmt.Printf("Error binding %s/%s into %q for %v\n", roleType, role, ns, subjects)
glog.Errorf("Error binding %s/%s into %q for %v\n", roleType, role, ns, subjects)
}
}
@@ -140,7 +140,7 @@ func IsRBACEnabled(f *Framework) bool {
Logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err)
isRBACEnabled = false
} else if crs == nil || len(crs.Items) == 0 {
Logf("No ClusteRoles found; assuming RBAC is disabled.")
Logf("No ClusterRoles found; assuming RBAC is disabled.")
isRBACEnabled = false
} else {
Logf("Found ClusterRoles; assuming RBAC is enabled.")

View File

@@ -21,7 +21,7 @@ import (
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
crdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apiextensions-apiserver/test/integration/testserver"
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
@@ -76,7 +76,7 @@ func CreateTestCRD(f *Framework) (*TestCrd, error) {
crd := newCRDForTest(testcrd)
//create CRD and waits for the resource to be recognized and available.
crd, err = testserver.CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)
crd, err = fixtures.CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)
if err != nil {
Failf("failed to create CustomResourceDefinition: %v", err)
return nil, err
@@ -89,7 +89,7 @@ func CreateTestCRD(f *Framework) (*TestCrd, error) {
testcrd.Crd = crd
testcrd.DynamicClient = resourceClient
testcrd.CleanUp = func() error {
err := testserver.DeleteCustomResourceDefinition(crd, apiExtensionClient)
err := fixtures.DeleteCustomResourceDefinition(crd, apiExtensionClient)
if err != nil {
Failf("failed to delete CustomResourceDefinition(%s): %v", name, err)
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package framework
import (
"context"
"fmt"
"time"
@@ -30,9 +31,11 @@ import (
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale"
watchtools "k8s.io/client-go/tools/watch"
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*apps.Deployment, error) {
@@ -172,7 +175,9 @@ func WatchRecreateDeployment(c clientset.Interface, d *apps.Deployment) error {
d.Generation <= d.Status.ObservedGeneration, nil
}
_, err = watch.Until(2*time.Minute, w, condition)
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, w, condition)
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
}
@@ -255,7 +260,7 @@ func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector ma
Containers: []v1.Container{
{
Name: "write-pod",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
SecurityContext: &v1.SecurityContext{

View File

@@ -54,7 +54,7 @@ func ConstructFirewallForLBService(svc *v1.Service, nodeTag string) *compute.Fir
Failf("can not construct firewall rule for non-loadbalancer type service")
}
fw := compute.Firewall{}
fw.Name = MakeFirewallNameForLBService(cloudprovider.GetLoadBalancerName(svc))
fw.Name = MakeFirewallNameForLBService(cloudprovider.DefaultLoadBalancerName(svc))
fw.TargetTags = []string{nodeTag}
if svc.Spec.LoadBalancerSourceRanges == nil {
fw.SourceRanges = []string{"0.0.0.0/0"}
@@ -80,7 +80,7 @@ func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service,
Failf("can not construct firewall rule for non-loadbalancer type service")
}
fw := compute.Firewall{}
fw.Name = MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.GetLoadBalancerName(svc), isNodesHealthCheck)
fw.Name = MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), isNodesHealthCheck)
fw.TargetTags = []string{nodeTag}
fw.SourceRanges = gcecloud.LoadBalancerSrcRanges()
healthCheckPort := gcecloud.GetNodesHealthCheckPort()

View File

@@ -0,0 +1,91 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bytes"
"fmt"
"sync"
)
type FlakeReport struct {
lock sync.RWMutex
Flakes []string `json:"flakes"`
FlakeCount int `json:"flakeCount"`
}
func NewFlakeReport() *FlakeReport {
return &FlakeReport{
Flakes: []string{},
}
}
func buildDescription(optionalDescription ...interface{}) string {
switch len(optionalDescription) {
case 0:
return ""
default:
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...)
}
}
// RecordFlakeIfError records the error (if non-nil) as a flake along with an optional description.
// This can be used as a replacement of framework.ExpectNoError() for non-critical errors that can
// be considered as 'flakes' to avoid causing failures in tests.
func (f *FlakeReport) RecordFlakeIfError(err error, optionalDescription ...interface{}) {
if err == nil {
return
}
msg := fmt.Sprintf("Unexpected error occurred: %v", err)
desc := buildDescription(optionalDescription)
if desc != "" {
msg = fmt.Sprintf("%v (Description: %v)", msg, desc)
}
Logf(msg)
f.lock.Lock()
defer f.lock.Unlock()
f.Flakes = append(f.Flakes, msg)
f.FlakeCount++
}
func (f *FlakeReport) GetFlakeCount() int {
f.lock.RLock()
defer f.lock.RUnlock()
return f.FlakeCount
}
func (f *FlakeReport) PrintHumanReadable() string {
f.lock.RLock()
defer f.lock.RUnlock()
buf := bytes.Buffer{}
buf.WriteString(fmt.Sprintf("FlakeCount: %v\n", f.FlakeCount))
buf.WriteString("Flakes:\n")
for _, flake := range f.Flakes {
buf.WriteString(fmt.Sprintf("%v\n", flake))
}
return buf.String()
}
func (f *FlakeReport) PrintJSON() string {
f.lock.RLock()
defer f.lock.RUnlock()
return PrettyPrintJSON(f)
}
func (f *FlakeReport) SummaryKind() string {
return "FlakeReport"
}

View File

@@ -21,12 +21,12 @@ import (
"bytes"
"fmt"
"os"
"path"
"strings"
"sync"
"time"
"k8s.io/api/core/v1"
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@@ -42,6 +42,7 @@ import (
"k8s.io/client-go/restmapper"
scaleclient "k8s.io/client-go/scale"
"k8s.io/client-go/tools/clientcmd"
csi "k8s.io/csi-api/pkg/client/clientset/versioned"
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
@@ -67,6 +68,8 @@ type Framework struct {
ClientSet clientset.Interface
KubemarkExternalClusterClientSet clientset.Interface
APIExtensionsClientSet apiextensionsclient.Interface
CSIClientSet csi.Interface
InternalClientset *internalclientset.Clientset
AggregatorClient *aggregatorclient.Clientset
@@ -90,6 +93,9 @@ type Framework struct {
logsSizeCloseChannel chan bool
logsSizeVerifier *LogsSizeVerifier
// Flaky operation failures in an e2e test can be captured through this.
flakeReport *FlakeReport
// To make sure that this framework cleans up after itself, no matter what,
// we install a Cleanup action before each test and clear it after. If we
// should abort, the AfterSuite hook should run all Cleanup actions.
@@ -152,6 +158,15 @@ func (f *Framework) BeforeEach() {
if f.ClientSet == nil {
By("Creating a kubernetes client")
config, err := LoadConfig()
testDesc := CurrentGinkgoTestDescription()
if len(testDesc.ComponentTexts) > 0 {
componentTexts := strings.Join(testDesc.ComponentTexts, " ")
config.UserAgent = fmt.Sprintf(
"%v -- %v",
rest.DefaultKubernetesUserAgent(),
componentTexts)
}
Expect(err).NotTo(HaveOccurred())
config.QPS = f.Options.ClientQPS
config.Burst = f.Options.ClientBurst
@@ -163,12 +178,19 @@ func (f *Framework) BeforeEach() {
}
f.ClientSet, err = clientset.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
f.APIExtensionsClientSet, err = apiextensionsclient.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
f.InternalClientset, err = internalclientset.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
f.AggregatorClient, err = aggregatorclient.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
f.DynamicClient, err = dynamic.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
// csi.storage.k8s.io is based on CRD, which is served only as JSON
jsonConfig := config
jsonConfig.ContentType = "application/json"
f.CSIClientSet, err = csi.NewForConfig(jsonConfig)
Expect(err).NotTo(HaveOccurred())
// create scales getter, set GroupVersion and NegotiatedSerializer to default values
// as they are required when creating a REST client.
@@ -210,7 +232,7 @@ func (f *Framework) BeforeEach() {
}
if !f.SkipNamespaceCreation {
By("Building a namespace api object")
By(fmt.Sprintf("Building a namespace api object, basename %s", f.BaseName))
namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
"e2e-framework": f.BaseName,
})
@@ -269,6 +291,8 @@ func (f *Framework) BeforeEach() {
}
}
f.flakeReport = NewFlakeReport()
}
// AfterEach deletes the namespace, after reading its events.
@@ -326,25 +350,6 @@ func (f *Framework) AfterEach() {
if !f.SkipNamespaceCreation {
DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name)
}
logFunc := Logf
if TestContext.ReportDir != "" {
filePath := path.Join(TestContext.ReportDir, "image-puller.txt")
file, err := os.Create(filePath)
if err != nil {
By(fmt.Sprintf("Failed to create a file with image-puller data %v: %v\nPrinting to stdout", filePath, err))
} else {
By(fmt.Sprintf("Dumping a list of prepulled images on each node to file %v", filePath))
defer file.Close()
if err = file.Chmod(0644); err != nil {
Logf("Failed to chmod to 644 of %v: %v", filePath, err)
}
logFunc = GetLogToFileFunc(file)
}
} else {
By("Dumping a list of prepulled images on each node...")
}
LogContainersInPodsWithLabels(f.ClientSet, metav1.NamespaceSystem, ImagePullerLabels, "image-puller", logFunc)
}
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil {
@@ -382,6 +387,12 @@ func (f *Framework) AfterEach() {
close(f.kubemarkControllerCloseChannel)
}
// Report any flakes that were observed in the e2e test and reset.
if f.flakeReport != nil && f.flakeReport.GetFlakeCount() > 0 {
f.TestSummaries = append(f.TestSummaries, f.flakeReport)
f.flakeReport = nil
}
PrintSummaries(f.TestSummaries, f.BaseName)
// Check whether all nodes are ready after the test.
@@ -409,6 +420,10 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
return ns, err
}
func (f *Framework) RecordFlakeIfError(err error, optionalDescription ...interface{}) {
f.flakeReport.RecordFlakeIfError(err, optionalDescription)
}
// AddNamespacesToDelete adds one or more namespaces to be deleted when the test
// completes.
func (f *Framework) AddNamespacesToDelete(namespaces ...*v1.Namespace) {
@@ -533,7 +548,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
return nil
} else {
return []v1.ServicePort{{
Protocol: "TCP",
Protocol: v1.ProtocolTCP,
Port: int32(svcPort),
TargetPort: intstr.FromInt(contPort),
}}

View File

@@ -21,6 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"github.com/golang/glog"
. "github.com/onsi/gomega"
)
@@ -50,13 +51,13 @@ func NumberOfNVIDIAGPUs(node *v1.Node) int64 {
}
// NVIDIADevicePlugin returns the official Google Device Plugin pod for NVIDIA GPU in GKE
func NVIDIADevicePlugin(ns string) *v1.Pod {
func NVIDIADevicePlugin() *v1.Pod {
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
Expect(err).NotTo(HaveOccurred())
p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "device-plugin-nvidia-gpu-" + string(uuid.NewUUID()),
Namespace: ns,
Namespace: metav1.NamespaceSystem,
},
Spec: ds.Spec.Template.Spec,
@@ -69,7 +70,16 @@ func NVIDIADevicePlugin(ns string) *v1.Pod {
func GetGPUDevicePluginImage() string {
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
if err != nil || ds == nil || len(ds.Spec.Template.Spec.Containers) < 1 {
if err != nil {
glog.Errorf("Failed to parse the device plugin image: %v", err)
return ""
}
if ds == nil {
glog.Errorf("Failed to parse the device plugin image: the extracted DaemonSet is nil")
return ""
}
if len(ds.Spec.Template.Spec.Containers) < 1 {
glog.Errorf("Failed to parse the device plugin image: cannot extract the container from YAML")
return ""
}
return ds.Spec.Template.Spec.Containers[0].Image

Some files were not shown because too many files have changed in this diff Show More