Bumping k8s dependencies to 1.13
This commit is contained in:
10
vendor/k8s.io/kubernetes/test/kubemark/common/util.sh
generated
vendored
10
vendor/k8s.io/kubernetes/test/kubemark/common/util.sh
generated
vendored
@@ -26,19 +26,19 @@ function run-cmd-with-retries {
|
||||
if [[ "${ret_val:-0}" -ne "0" ]]; then
|
||||
if [[ $(echo "${result}" | grep -c "already exists") -gt 0 ]]; then
|
||||
if [[ "${attempt}" == 1 ]]; then
|
||||
echo -e "${color_red}Failed to $1 $2 $3 as the resource hasn't been deleted from a previous run.${color_norm}" >& 2
|
||||
echo -e "${color_red}Failed to $1 $2 ${3:-} as the resource hasn't been deleted from a previous run.${color_norm}" >& 2
|
||||
exit 1
|
||||
fi
|
||||
echo -e "${color_yellow}Succeeded to $1 $2 $3 in the previous attempt, but status response wasn't received.${color_norm}"
|
||||
echo -e "${color_yellow}Succeeded to $1 $2 ${3:-} in the previous attempt, but status response wasn't received.${color_norm}"
|
||||
return 0
|
||||
fi
|
||||
echo -e "${color_yellow}Attempt $attempt failed to $1 $2 $3. Retrying.${color_norm}" >& 2
|
||||
echo -e "${color_yellow}Attempt $attempt failed to $1 $2 ${3:-}. Retrying.${color_norm}" >& 2
|
||||
sleep $(($attempt * 5))
|
||||
else
|
||||
echo -e "${color_green}Succeeded to $1 $2 $3.${color_norm}"
|
||||
echo -e "${color_green}Succeeded to $1 $2 ${3:-}.${color_norm}"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
echo -e "${color_red}Failed to $1 $2 $3.${color_norm}" >& 2
|
||||
echo -e "${color_red}Failed to $1 $2 ${3:-}.${color_norm}" >& 2
|
||||
exit 1
|
||||
}
|
||||
|
61
vendor/k8s.io/kubernetes/test/kubemark/gce/util.sh
generated
vendored
61
vendor/k8s.io/kubernetes/test/kubemark/gce/util.sh
generated
vendored
@@ -25,28 +25,29 @@ function run-gcloud-compute-with-retries {
|
||||
run-cmd-with-retries gcloud compute "$@"
|
||||
}
|
||||
|
||||
function create-master-instance-with-resources {
|
||||
GCLOUD_COMMON_ARGS="--project ${PROJECT} --zone ${ZONE}"
|
||||
function authenticate-docker {
|
||||
echo "Configuring registry authentication"
|
||||
mkdir -p "${HOME}/.docker"
|
||||
gcloud beta auth configure-docker -q
|
||||
}
|
||||
|
||||
run-gcloud-compute-with-retries disks create "${MASTER_NAME}-pd" \
|
||||
${GCLOUD_COMMON_ARGS} \
|
||||
--type "${MASTER_DISK_TYPE}" \
|
||||
--size "${MASTER_DISK_SIZE}"
|
||||
|
||||
if [ "${EVENT_PD:-}" == "true" ]; then
|
||||
run-gcloud-compute-with-retries disks create "${MASTER_NAME}-event-pd" \
|
||||
${GCLOUD_COMMON_ARGS} \
|
||||
--type "${MASTER_DISK_TYPE}" \
|
||||
--size "${MASTER_DISK_SIZE}"
|
||||
fi
|
||||
|
||||
run-gcloud-compute-with-retries addresses create "${MASTER_NAME}-ip" \
|
||||
# This function isn't too robust to race, but that should be ok given its one-off usage during setup.
|
||||
function get-or-create-master-ip {
|
||||
MASTER_IP=$(gcloud compute addresses describe "${MASTER_NAME}-ip" \
|
||||
--project "${PROJECT}" --region "${REGION}" -q --format='value(address)') 2>/dev/null || true
|
||||
|
||||
if [[ -z "${MASTER_IP:-}" ]]; then
|
||||
run-gcloud-compute-with-retries addresses create "${MASTER_NAME}-ip" \
|
||||
--project "${PROJECT}" \
|
||||
--region "${REGION}" -q
|
||||
|
||||
MASTER_IP=$(gcloud compute addresses describe "${MASTER_NAME}-ip" \
|
||||
--project "${PROJECT}" --region "${REGION}" -q --format='value(address)')
|
||||
|
||||
MASTER_IP=$(gcloud compute addresses describe "${MASTER_NAME}-ip" \
|
||||
--project "${PROJECT}" --region "${REGION}" -q --format='value(address)')
|
||||
fi
|
||||
}
|
||||
|
||||
function create-master-instance-with-resources {
|
||||
GCLOUD_COMMON_ARGS="--project ${PROJECT} --zone ${ZONE}"
|
||||
# Override the master image project to cos-cloud for COS images staring with `cos` string prefix.
|
||||
DEFAULT_GCI_PROJECT=google-containers
|
||||
if [[ "${GCI_VERSION}" == "cos"* ]]; then
|
||||
@@ -54,6 +55,22 @@ function create-master-instance-with-resources {
|
||||
fi
|
||||
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-${DEFAULT_GCI_PROJECT}}
|
||||
|
||||
run-gcloud-compute-with-retries disks create "${MASTER_NAME}-pd" \
|
||||
${GCLOUD_COMMON_ARGS} \
|
||||
--type "${MASTER_DISK_TYPE}" \
|
||||
--size "${MASTER_DISK_SIZE}" &
|
||||
|
||||
if [ "${EVENT_PD:-}" == "true" ]; then
|
||||
run-gcloud-compute-with-retries disks create "${MASTER_NAME}-event-pd" \
|
||||
${GCLOUD_COMMON_ARGS} \
|
||||
--type "${MASTER_DISK_TYPE}" \
|
||||
--size "${MASTER_DISK_SIZE}" &
|
||||
fi
|
||||
|
||||
get-or-create-master-ip &
|
||||
|
||||
wait
|
||||
|
||||
run-gcloud-compute-with-retries instances create "${MASTER_NAME}" \
|
||||
${GCLOUD_COMMON_ARGS} \
|
||||
--address "${MASTER_IP}" \
|
||||
@@ -68,14 +85,14 @@ function create-master-instance-with-resources {
|
||||
|
||||
run-gcloud-compute-with-retries instances add-metadata "${MASTER_NAME}" \
|
||||
${GCLOUD_COMMON_ARGS} \
|
||||
--metadata-from-file startup-script="${KUBE_ROOT}/test/kubemark/resources/start-kubemark-master.sh"
|
||||
--metadata-from-file startup-script="${KUBE_ROOT}/test/kubemark/resources/start-kubemark-master.sh" &
|
||||
|
||||
if [ "${EVENT_PD:-}" == "true" ]; then
|
||||
echo "Attaching ${MASTER_NAME}-event-pd to ${MASTER_NAME}"
|
||||
run-gcloud-compute-with-retries instances attach-disk "${MASTER_NAME}" \
|
||||
${GCLOUD_COMMON_ARGS} \
|
||||
--disk "${MASTER_NAME}-event-pd" \
|
||||
--device-name="master-event-pd"
|
||||
--device-name="master-event-pd" &
|
||||
fi
|
||||
|
||||
run-gcloud-compute-with-retries firewall-rules create "${MASTER_NAME}-https" \
|
||||
@@ -83,7 +100,9 @@ function create-master-instance-with-resources {
|
||||
--network "${NETWORK}" \
|
||||
--source-ranges "0.0.0.0/0" \
|
||||
--target-tags "${MASTER_TAG}" \
|
||||
--allow "tcp:443"
|
||||
--allow "tcp:443" &
|
||||
|
||||
wait
|
||||
}
|
||||
|
||||
# Command to be executed is '$1'.
|
||||
|
42
vendor/k8s.io/kubernetes/test/kubemark/iks/shutdown.sh
generated
vendored
Normal file
42
vendor/k8s.io/kubernetes/test/kubemark/iks/shutdown.sh
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Script that destroys the clusters used, namespace, and deployment.
|
||||
|
||||
KUBECTL=kubectl
|
||||
KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark"
|
||||
RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources"
|
||||
|
||||
# Login to cloud services
|
||||
complete-login
|
||||
|
||||
# Remove resources created for kubemark
|
||||
echo -e "${color_yellow}REMOVING RESOURCES${color_norm}"
|
||||
spawn-config
|
||||
"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/addons" &> /dev/null || true
|
||||
"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/hollow-node.yaml" &> /dev/null || true
|
||||
"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/kubemark-ns.json" &> /dev/null || true
|
||||
rm -rf "${RESOURCE_DIRECTORY}/addons"
|
||||
"${RESOURCE_DIRECTORY}/hollow-node.yaml" &> /dev/null || true
|
||||
|
||||
# Remove clusters, namespaces, and deployments
|
||||
delete-clusters
|
||||
if [[ -f "${RESOURCE_DIRECTORY}/iks-namespacelist.sh" ]] ; then
|
||||
bash ${RESOURCE_DIRECTORY}/iks-namespacelist.sh
|
||||
rm -f ${RESOURCE_DIRECTORY}/iks-namespacelist.sh
|
||||
fi
|
||||
echo -e "${color_blue}EXECUTION COMPLETE${color_norm}"
|
||||
exit 0
|
294
vendor/k8s.io/kubernetes/test/kubemark/iks/startup.sh
generated
vendored
Normal file
294
vendor/k8s.io/kubernetes/test/kubemark/iks/startup.sh
generated
vendored
Normal file
@@ -0,0 +1,294 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Script that creates a Kubemark cluster for IBM cloud.
|
||||
|
||||
KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark"
|
||||
RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources"
|
||||
|
||||
# Generate secret and configMap for the hollow-node pods to work, prepare
|
||||
# manifests of the hollow-node and heapster replication controllers from
|
||||
# templates, and finally create these resources through kubectl.
|
||||
function create-kube-hollow-node-resources {
|
||||
# Create kubeconfig for Kubelet.
|
||||
KUBELET_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
client-certificate-data: "${KUBELET_CERT_BASE64}"
|
||||
client-key-data: "${KUBELET_KEY_BASE64}"
|
||||
clusters:
|
||||
- name: kubemark
|
||||
cluster:
|
||||
certificate-authority-data: "${CA_CERT_BASE64}"
|
||||
server: https://${MASTER_IP}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubemark
|
||||
user: kubelet
|
||||
name: kubemark-context
|
||||
current-context: kubemark-context")
|
||||
|
||||
# Create kubeconfig for Kubeproxy.
|
||||
KUBEPROXY_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: kube-proxy
|
||||
user:
|
||||
client-certificate-data: "${KUBELET_CERT_BASE64}"
|
||||
client-key-data: "${KUBELET_KEY_BASE64}"
|
||||
clusters:
|
||||
- name: kubemark
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://${MASTER_IP}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubemark
|
||||
user: kube-proxy
|
||||
name: kubemark-context
|
||||
current-context: kubemark-context")
|
||||
|
||||
# Create kubeconfig for Heapster.
|
||||
HEAPSTER_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: heapster
|
||||
user:
|
||||
client-certificate-data: "${KUBELET_CERT_BASE64}"
|
||||
client-key-data: "${KUBELET_KEY_BASE64}"
|
||||
clusters:
|
||||
- name: kubemark
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://${MASTER_IP}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubemark
|
||||
user: heapster
|
||||
name: kubemark-context
|
||||
current-context: kubemark-context")
|
||||
|
||||
# Create kubeconfig for Cluster Autoscaler.
|
||||
CLUSTER_AUTOSCALER_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: cluster-autoscaler
|
||||
user:
|
||||
client-certificate-data: "${KUBELET_CERT_BASE64}"
|
||||
client-key-data: "${KUBELET_KEY_BASE64}"
|
||||
clusters:
|
||||
- name: kubemark
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://${MASTER_IP}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubemark
|
||||
user: cluster-autoscaler
|
||||
name: kubemark-context
|
||||
current-context: kubemark-context")
|
||||
|
||||
# Create kubeconfig for NodeProblemDetector.
|
||||
NPD_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: node-problem-detector
|
||||
user:
|
||||
client-certificate-data: "${KUBELET_CERT_BASE64}"
|
||||
client-key-data: "${KUBELET_KEY_BASE64}"
|
||||
clusters:
|
||||
- name: kubemark
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://${MASTER_IP}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubemark
|
||||
user: node-problem-detector
|
||||
name: kubemark-context
|
||||
current-context: kubemark-context")
|
||||
|
||||
# Create kubeconfig for Kube DNS.
|
||||
KUBE_DNS_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: kube-dns
|
||||
user:
|
||||
client-certificate-data: "${KUBELET_CERT_BASE64}"
|
||||
client-key-data: "${KUBELET_KEY_BASE64}"
|
||||
clusters:
|
||||
- name: kubemark
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://${MASTER_IP}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubemark
|
||||
user: kube-dns
|
||||
name: kubemark-context
|
||||
current-context: kubemark-context")
|
||||
|
||||
# Create kubemark namespace.
|
||||
spawn-config
|
||||
if kubectl get ns | grep -Fq "kubemark"; then
|
||||
kubectl delete ns kubemark
|
||||
while kubectl get ns | grep -Fq "kubemark"
|
||||
do
|
||||
sleep 10
|
||||
done
|
||||
fi
|
||||
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/kubemark-ns.json"
|
||||
# Create configmap for configuring hollow- kubelet, proxy and npd.
|
||||
"${KUBECTL}" create configmap "node-configmap" --namespace="kubemark" \
|
||||
--from-literal=content.type="${TEST_CLUSTER_API_CONTENT_TYPE}" \
|
||||
--from-file=kernel.monitor="${RESOURCE_DIRECTORY}/kernel-monitor.json"
|
||||
|
||||
# Create secret for passing kubeconfigs to kubelet, kubeproxy and npd.
|
||||
"${KUBECTL}" create secret generic "kubeconfig" --type=Opaque --namespace="kubemark" \
|
||||
--from-literal=kubelet.kubeconfig="${KUBELET_KUBECONFIG_CONTENTS}" \
|
||||
--from-literal=kubeproxy.kubeconfig="${KUBEPROXY_KUBECONFIG_CONTENTS}" \
|
||||
--from-literal=heapster.kubeconfig="${HEAPSTER_KUBECONFIG_CONTENTS}" \
|
||||
--from-literal=cluster_autoscaler.kubeconfig="${CLUSTER_AUTOSCALER_KUBECONFIG_CONTENTS}" \
|
||||
--from-literal=npd.kubeconfig="${NPD_KUBECONFIG_CONTENTS}" \
|
||||
--from-literal=dns.kubeconfig="${KUBE_DNS_KUBECONFIG_CONTENTS}"
|
||||
|
||||
# Create addon pods.
|
||||
# Heapster.
|
||||
mkdir -p "${RESOURCE_DIRECTORY}/addons"
|
||||
sed "s/{{MASTER_IP}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/heapster_template.json" > "${RESOURCE_DIRECTORY}/addons/heapster.json"
|
||||
metrics_mem_per_node=4
|
||||
metrics_mem=$((200 + ${metrics_mem_per_node}*${NUM_NODES}))
|
||||
sed -i'' -e "s/{{METRICS_MEM}}/${metrics_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
|
||||
metrics_cpu_per_node_numerator=${NUM_NODES}
|
||||
metrics_cpu_per_node_denominator=2
|
||||
metrics_cpu=$((80 + metrics_cpu_per_node_numerator / metrics_cpu_per_node_denominator))
|
||||
sed -i'' -e "s/{{METRICS_CPU}}/${metrics_cpu}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
|
||||
eventer_mem_per_node=500
|
||||
eventer_mem=$((200 * 1024 + ${eventer_mem_per_node}*${NUM_NODES}))
|
||||
sed -i'' -e "s/{{EVENTER_MEM}}/${eventer_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
|
||||
|
||||
# Cluster Autoscaler.
|
||||
if [[ "${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER:-}" == "true" ]]; then
|
||||
echo "Setting up Cluster Autoscaler"
|
||||
KUBEMARK_AUTOSCALER_MIG_NAME="${KUBEMARK_AUTOSCALER_MIG_NAME:-${NODE_INSTANCE_PREFIX}-group}"
|
||||
KUBEMARK_AUTOSCALER_MIN_NODES="${KUBEMARK_AUTOSCALER_MIN_NODES:-0}"
|
||||
KUBEMARK_AUTOSCALER_MAX_NODES="${KUBEMARK_AUTOSCALER_MAX_NODES:-${DESIRED_NODES}}"
|
||||
NUM_NODES=${KUBEMARK_AUTOSCALER_MAX_NODES}
|
||||
echo "Setting maximum cluster size to ${NUM_NODES}."
|
||||
KUBEMARK_MIG_CONFIG="autoscaling.k8s.io/nodegroup: ${KUBEMARK_AUTOSCALER_MIG_NAME}"
|
||||
sed "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/cluster-autoscaler_template.json" > "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
|
||||
sed -i'' -e "s/{{kubemark_autoscaler_mig_name}}/${KUBEMARK_AUTOSCALER_MIG_NAME}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
|
||||
sed -i'' -e "s/{{kubemark_autoscaler_min_nodes}}/${KUBEMARK_AUTOSCALER_MIN_NODES}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
|
||||
sed -i'' -e "s/{{kubemark_autoscaler_max_nodes}}/${KUBEMARK_AUTOSCALER_MAX_NODES}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
|
||||
fi
|
||||
|
||||
# Kube DNS.
|
||||
if [[ "${ENABLE_KUBEMARK_KUBE_DNS:-}" == "true" ]]; then
|
||||
echo "Setting up kube-dns"
|
||||
sed "s/{{dns_domain}}/${KUBE_DNS_DOMAIN}/g" "${RESOURCE_DIRECTORY}/kube_dns_template.yaml" > "${RESOURCE_DIRECTORY}/addons/kube_dns.yaml"
|
||||
fi
|
||||
|
||||
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/addons" --namespace="kubemark"
|
||||
set-registry-secrets
|
||||
|
||||
# Create the replication controller for hollow-nodes.
|
||||
# We allow to override the NUM_REPLICAS when running Cluster Autoscaler.
|
||||
NUM_REPLICAS=${NUM_REPLICAS:-${NUM_NODES}}
|
||||
sed "s/{{numreplicas}}/${NUM_REPLICAS}/g" "${RESOURCE_DIRECTORY}/hollow-node_template.yaml" > "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
||||
proxy_cpu=20
|
||||
if [ "${NUM_NODES}" -gt 1000 ]; then
|
||||
proxy_cpu=50
|
||||
fi
|
||||
proxy_mem_per_node=50
|
||||
proxy_mem=$((100 * 1024 + ${proxy_mem_per_node}*${NUM_NODES}))
|
||||
sed -i'' -e "s/{{HOLLOW_PROXY_CPU}}/${proxy_cpu}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
||||
sed -i'' -e "s/{{HOLLOW_PROXY_MEM}}/${proxy_mem}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
||||
sed -i'' -e "s'{{kubemark_image_registry}}'${KUBEMARK_IMAGE_REGISTRY}${KUBE_NAMESPACE}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
||||
sed -i'' -e "s/{{kubemark_image_tag}}/${KUBEMARK_IMAGE_TAG}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
||||
sed -i'' -e "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
||||
sed -i'' -e "s/{{kubelet_verbosity_level}}/${KUBELET_TEST_LOG_LEVEL}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
||||
sed -i'' -e "s/{{kubeproxy_verbosity_level}}/${KUBEPROXY_TEST_LOG_LEVEL}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
||||
sed -i'' -e "s/{{use_real_proxier}}/${USE_REAL_PROXIER}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
||||
sed -i'' -e "s'{{kubemark_mig_config}}'${KUBEMARK_MIG_CONFIG:-}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
||||
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/hollow-node.yaml" --namespace="kubemark"
|
||||
|
||||
echo "Created secrets, configMaps, replication-controllers required for hollow-nodes."
|
||||
}
|
||||
|
||||
# Wait until all hollow-nodes are running or there is a timeout.
|
||||
function wait-for-hollow-nodes-to-run-or-timeout {
|
||||
echo -n "Waiting for all hollow-nodes to become Running"
|
||||
start=$(date +%s)
|
||||
nodes=$("${KUBECTL}" --kubeconfig="${KUBECONFIG}" get node 2> /dev/null) || true
|
||||
ready=$(($(echo "${nodes}" | grep -v "NotReady" | wc -l) - 1))
|
||||
|
||||
until [[ "${ready}" -ge "${NUM_REPLICAS}" ]]; do
|
||||
echo -n "."
|
||||
sleep 1
|
||||
now=$(date +%s)
|
||||
# Fail it if it already took more than 30 minutes.
|
||||
if [ $((now - start)) -gt 1800 ]; then
|
||||
echo ""
|
||||
echo -e "${color_red} Timeout waiting for all hollow-nodes to become Running. ${color_norm}"
|
||||
# Try listing nodes again - if it fails it means that API server is not responding
|
||||
if "${KUBECTL}" --kubeconfig="${KUBECONFIG}" get node &> /dev/null; then
|
||||
echo "Found only ${ready} ready hollow-nodes while waiting for ${NUM_NODES}."
|
||||
else
|
||||
echo "Got error while trying to list hollow-nodes. Probably API server is down."
|
||||
fi
|
||||
spawn-config
|
||||
pods=$("${KUBECTL}" get pods -l name=hollow-node --namespace=kubemark) || true
|
||||
running=$(($(echo "${pods}" | grep "Running" | wc -l)))
|
||||
echo "${running} hollow-nodes are reported as 'Running'"
|
||||
not_running=$(($(echo "${pods}" | grep -v "Running" | wc -l) - 1))
|
||||
echo "${not_running} hollow-nodes are reported as NOT 'Running'"
|
||||
echo $(echo "${pods}" | grep -v "Running")
|
||||
exit 1
|
||||
fi
|
||||
nodes=$("${KUBECTL}" --kubeconfig="${KUBECONFIG}" get node 2> /dev/null) || true
|
||||
ready=$(($(echo "${nodes}" | grep -v "NotReady" | wc -l) - 1))
|
||||
done
|
||||
echo -e "${color_green} Done!${color_norm}"
|
||||
}
|
||||
|
||||
############################### Main Function ########################################
|
||||
# In order for the cluster autoscalar to function, the template file must be changed so that the ":443"
|
||||
# is removed. This is because the port is already given with the MASTER_IP.
|
||||
|
||||
|
||||
# Create clusters and populate with hollow nodes
|
||||
complete-login
|
||||
build-kubemark-image
|
||||
choose-clusters
|
||||
generate-values
|
||||
set-hollow-master
|
||||
echo "Creating kube hollow node resources"
|
||||
create-kube-hollow-node-resources
|
||||
master-config
|
||||
echo -e "${color_blue}EXECUTION COMPLETE${color_norm}"
|
||||
|
||||
# Check status of Kubemark
|
||||
echo -e "${color_yellow}CHECKING STATUS${color_norm}"
|
||||
wait-for-hollow-nodes-to-run-or-timeout
|
||||
|
||||
# Celebrate
|
||||
echo ""
|
||||
echo -e "${color_blue}SUCCESS${color_norm}"
|
||||
clean-repo
|
||||
exit 0
|
206
vendor/k8s.io/kubernetes/test/kubemark/iks/util.sh
generated
vendored
Normal file
206
vendor/k8s.io/kubernetes/test/kubemark/iks/util.sh
generated
vendored
Normal file
@@ -0,0 +1,206 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../..
|
||||
|
||||
# Creates a new kube-spawn cluster
|
||||
function create-clusters {
|
||||
echo -e "${color_yellow}CHECKING CLUSTERS${color_norm}"
|
||||
if bx cs clusters | grep -Fq 'deleting'; then
|
||||
echo -n "Deleting old clusters"
|
||||
fi
|
||||
while bx cs clusters | grep -Fq 'deleting'
|
||||
do
|
||||
echo -n "."
|
||||
sleep 10
|
||||
done
|
||||
echo ""
|
||||
bx cs region-set us-east >/dev/null
|
||||
bx cs vlans wdc06 >/dev/null
|
||||
PRIVLAN=$(bx cs vlans wdc06 --json | jq '. | .[] | select(.type == "private") | .id' | sed -e "s/\"//g")
|
||||
PUBVLAN=$(bx cs vlans wdc06 --json | jq '. | .[] | select(.type == "public") | .id' | sed -e "s/\"//g")
|
||||
if ! bx cs clusters | grep -Fq 'kubeSpawnTester'; then
|
||||
echo "Creating spawning cluster"
|
||||
bx cs cluster-create --location ${CLUSTER_LOCATION} --public-vlan ${PUBVLAN} --private-vlan ${PRIVLAN} --workers 2 --machine-type u2c.2x4 --name kubeSpawnTester
|
||||
fi
|
||||
if ! bx cs clusters | grep -Fq 'kubeMasterTester'; then
|
||||
echo "Creating master cluster"
|
||||
bx cs cluster-create --location ${CLUSTER_LOCATION} --public-vlan ${PUBVLAN} --private-vlan ${PRIVLAN} --workers 2 --machine-type u2c.2x4 --name kubeMasterTester
|
||||
fi
|
||||
push-image
|
||||
if ! bx cs clusters | grep 'kubeSpawnTester' | grep -Fq 'normal'; then
|
||||
echo -e "${color_cyan}Warning: new clusters may take up to 60 minutes to be ready${color_norm}"
|
||||
echo -n "Clusters loading"
|
||||
fi
|
||||
while ! bx cs clusters | grep 'kubeSpawnTester' | grep -Fq 'normal'
|
||||
do
|
||||
echo -n "."
|
||||
sleep 5
|
||||
done
|
||||
while ! bx cs clusters | grep 'kubeMasterTester' | grep -Fq 'normal'
|
||||
do
|
||||
echo -n "."
|
||||
sleep 5
|
||||
done
|
||||
echo -e "${color_yellow}CLUSTER CREATION COMPLETE${color_norm}"
|
||||
}
|
||||
|
||||
# Builds and pushes image to registry
|
||||
function push-image {
|
||||
if [[ "${ISBUILD}" = "y" ]]; then
|
||||
if ! bx cr namespaces | grep -Fq ${KUBE_NAMESPACE}; then
|
||||
echo "Creating registry namespace"
|
||||
bx cr namespace-add ${KUBE_NAMESPACE}
|
||||
echo "bx cr namespace-rm ${KUBE_NAMESPACE}" >> ${RESOURCE_DIRECTORY}/iks-namespacelist.sh
|
||||
fi
|
||||
docker build -t ${KUBEMARK_INIT_TAG} ${KUBEMARK_IMAGE_LOCATION}
|
||||
docker tag ${KUBEMARK_INIT_TAG} ${KUBEMARK_IMAGE_REGISTRY}${KUBE_NAMESPACE}/${PROJECT}:${KUBEMARK_IMAGE_TAG}
|
||||
docker push ${KUBEMARK_IMAGE_REGISTRY}${KUBE_NAMESPACE}/${PROJECT}:${KUBEMARK_IMAGE_TAG}
|
||||
echo "Image pushed"
|
||||
else
|
||||
KUBEMARK_IMAGE_REGISTRY=$(echo "brandondr96")
|
||||
KUBE_NAMESPACE=""
|
||||
fi
|
||||
}
|
||||
|
||||
# Allow user to use existing clusters if desired
|
||||
function choose-clusters {
|
||||
echo -n -e "Do you want to use custom clusters? [y/N]${color_cyan}>${color_norm} "
|
||||
read USE_EXISTING
|
||||
if [[ "${USE_EXISTING}" = "y" ]]; then
|
||||
echo -e "${color_yellow}Enter path for desired hollow-node spawning cluster kubeconfig file:${color_norm}"
|
||||
read CUSTOM_SPAWN_CONFIG
|
||||
echo -e "${color_yellow}Enter path for desired hollow-node hosting cluster kubeconfig file:${color_norm}"
|
||||
read CUSTOM_MASTER_CONFIG
|
||||
push-image
|
||||
elif [[ "${USE_EXISTING}" = "N" ]]; then
|
||||
create-clusters
|
||||
else
|
||||
echo -e "${color_red}Invalid response, please try again:${color_norm}"
|
||||
choose-clusters
|
||||
fi
|
||||
}
|
||||
|
||||
# Ensure secrets are correctly set
|
||||
function set-registry-secrets {
|
||||
spawn-config
|
||||
kubectl get secret bluemix-default-secret-regional -o yaml | sed 's/default/kubemark/g' | kubectl -n kubemark create -f -
|
||||
kubectl patch serviceaccount -n kubemark default -p '{"imagePullSecrets": [{"name": "bluemix-kubemark-secret"}]}'
|
||||
kubectl -n kubemark get serviceaccounts default -o json | jq 'del(.metadata.resourceVersion)' | jq 'setpath(["imagePullSecrets"];[{"name":"bluemix-kubemark-secret-regional"}])' | kubectl -n kubemark replace serviceaccount default -f -
|
||||
}
|
||||
|
||||
# Sets hollow nodes spawned under master
|
||||
function set-hollow-master {
|
||||
echo -e "${color_yellow}CONFIGURING MASTER${color_norm}"
|
||||
master-config
|
||||
MASTER_IP=$(cat $KUBECONFIG | grep server | awk -F "/" '{print $3}')
|
||||
}
|
||||
|
||||
# Set up master cluster environment
|
||||
function master-config {
|
||||
if [[ "${USE_EXISTING}" = "y" ]]; then
|
||||
export KUBECONFIG=${CUSTOM_MASTER_CONFIG}
|
||||
else
|
||||
$(bx cs cluster-config kubeMasterTester --admin | grep export)
|
||||
fi
|
||||
}
|
||||
|
||||
# Set up spawn cluster environment
|
||||
function spawn-config {
|
||||
if [[ "${USE_EXISTING}" = "y" ]]; then
|
||||
export KUBECONFIG=${CUSTOM_SPAWN_CONFIG}
|
||||
else
|
||||
$(bx cs cluster-config kubeSpawnTester --admin | grep export)
|
||||
fi
|
||||
}
|
||||
|
||||
# Deletes existing clusters
|
||||
function delete-clusters {
|
||||
echo "DELETING CLUSTERS"
|
||||
bx cs cluster-rm kubeSpawnTester
|
||||
bx cs cluster-rm kubeMasterTester
|
||||
while ! bx cs clusters | grep 'kubeSpawnTester' | grep -Fq 'deleting'
|
||||
do
|
||||
sleep 5
|
||||
done
|
||||
while ! bx cs clusters | grep 'kubeMasterTester' | grep -Fq 'deleting'
|
||||
do
|
||||
sleep 5
|
||||
done
|
||||
kubectl delete ns kubemark
|
||||
}
|
||||
|
||||
# Login to cloud services
|
||||
function complete-login {
|
||||
echo -e "${color_yellow}LOGGING INTO CLOUD SERVICES${color_norm}"
|
||||
echo -n -e "Do you have a federated IBM cloud login? [y/N]${color_cyan}>${color_norm} "
|
||||
read ISFED
|
||||
if [[ "${ISFED}" = "y" ]]; then
|
||||
bx login --sso -a ${REGISTRY_LOGIN_URL}
|
||||
elif [[ "${ISFED}" = "N" ]]; then
|
||||
bx login -a ${REGISTRY_LOGIN_URL}
|
||||
else
|
||||
echo -e "${color_red}Invalid response, please try again:${color_norm}"
|
||||
complete-login
|
||||
fi
|
||||
bx cr login
|
||||
}
|
||||
|
||||
# Generate values to fill the hollow-node configuration
|
||||
function generate-values {
|
||||
echo "Generating values"
|
||||
master-config
|
||||
KUBECTL=kubectl
|
||||
KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark"
|
||||
RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources"
|
||||
TEST_CLUSTER_API_CONTENT_TYPE="bluemix" #Determine correct usage of this
|
||||
CONFIGPATH=${KUBECONFIG%/*}
|
||||
KUBELET_CERT_BASE64="${KUBELET_CERT_BASE64:-$(cat ${CONFIGPATH}/admin.pem | base64 | tr -d '\r\n')}"
|
||||
KUBELET_KEY_BASE64="${KUBELET_KEY_BASE64:-$(cat ${CONFIGPATH}/admin-key.pem | base64 | tr -d '\r\n')}"
|
||||
CA_CERT_BASE64="${CA_CERT_BASE64:-$(cat `find ${CONFIGPATH} -name *ca*` | base64 | tr -d '\r\n')}"
|
||||
}
|
||||
|
||||
# Build image for kubemark
|
||||
function build-kubemark-image {
|
||||
echo -n -e "Do you want to build the kubemark image? [y/N]${color_cyan}>${color_norm} "
|
||||
read ISBUILD
|
||||
if [[ "${ISBUILD}" = "y" ]]; then
|
||||
echo -e "${color_yellow}BUILDING IMAGE${color_norm}"
|
||||
${KUBE_ROOT}/build/run.sh make kubemark
|
||||
cp ${KUBE_ROOT}/_output/dockerized/bin/linux/amd64/kubemark ${KUBEMARK_IMAGE_LOCATION}
|
||||
elif [[ "${ISBUILD}" = "N" ]]; then
|
||||
echo -n ""
|
||||
else
|
||||
echo -e "${color_red}Invalid response, please try again:${color_norm}"
|
||||
build-kubemark-image
|
||||
fi
|
||||
}
|
||||
|
||||
# Clean up repository
|
||||
function clean-repo {
|
||||
echo -n -e "Do you want to remove build output and binary? [y/N]${color_cyan}>${color_norm} "
|
||||
read ISCLEAN
|
||||
if [[ "${ISCLEAN}" = "y" ]]; then
|
||||
echo -e "${color_yellow}CLEANING REPO${color_norm}"
|
||||
rm -rf ${KUBE_ROOT}/_output
|
||||
rm -f ${KUBEMARK_IMAGE_LOCATION}/kubemark
|
||||
elif [[ "${ISCLEAN}" = "N" ]]; then
|
||||
echo -n ""
|
||||
else
|
||||
echo -e "${color_red}Invalid response, please try again:${color_norm}"
|
||||
clean-repo
|
||||
fi
|
||||
}
|
15
vendor/k8s.io/kubernetes/test/kubemark/resources/hollow-node_template.yaml
generated
vendored
15
vendor/k8s.io/kubernetes/test/kubemark/resources/hollow-node_template.yaml
generated
vendored
@@ -18,7 +18,7 @@ spec:
|
||||
initContainers:
|
||||
- name: init-inotify-limit
|
||||
image: busybox
|
||||
command: ['sysctl', '-w', 'fs.inotify.max_user_instances=200']
|
||||
command: ['sysctl', '-w', 'fs.inotify.max_user_instances=1000']
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumes:
|
||||
@@ -53,7 +53,7 @@ spec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- /kubemark --morph=kubelet --name=$(NODE_NAME) --kubeconfig=/kubeconfig/kubelet.kubeconfig $(CONTENT_TYPE) --alsologtostderr {{kubelet_verbosity_level}} 1>>/var/log/kubelet-$(NODE_NAME).log 2>&1
|
||||
- /kubemark --morph=kubelet --name=$(NODE_NAME) {{hollow_kubelet_params}} --kubeconfig=/kubeconfig/kubelet.kubeconfig $(CONTENT_TYPE) --alsologtostderr 1>>/var/log/kubelet-$(NODE_NAME).log 2>&1
|
||||
volumeMounts:
|
||||
- name: kubeconfig-volume
|
||||
mountPath: /kubeconfig
|
||||
@@ -81,7 +81,7 @@ spec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- /kubemark --morph=proxy --name=$(NODE_NAME) --use-real-proxier={{use_real_proxier}} --kubeconfig=/kubeconfig/kubeproxy.kubeconfig $(CONTENT_TYPE) --alsologtostderr {{kubeproxy_verbosity_level}} 1>>/var/log/kubeproxy-$(NODE_NAME).log 2>&1
|
||||
- /kubemark --morph=proxy --name=$(NODE_NAME) {{hollow_proxy_params}} --kubeconfig=/kubeconfig/kubeproxy.kubeconfig $(CONTENT_TYPE) --alsologtostderr 1>>/var/log/kubeproxy-$(NODE_NAME).log 2>&1
|
||||
volumeMounts:
|
||||
- name: kubeconfig-volume
|
||||
mountPath: /kubeconfig
|
||||
@@ -121,3 +121,12 @@ spec:
|
||||
memory: 20Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
# Keep the pod running on unreachable node for 15 minutes.
|
||||
# This time should be sufficient for a VM reboot and should
|
||||
# avoid recreating a new hollow node.
|
||||
# See https://github.com/kubernetes/kubernetes/issues/67120 for context.
|
||||
tolerations:
|
||||
- key: "node.kubernetes.io/unreachable"
|
||||
operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
tolerationSeconds: 900
|
||||
|
2
vendor/k8s.io/kubernetes/test/kubemark/resources/kube_dns_template.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/test/kubemark/resources/kube_dns_template.yaml
generated
vendored
@@ -60,7 +60,6 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
@@ -141,6 +140,7 @@ spec:
|
||||
- -k
|
||||
- --cache-size=1000
|
||||
- --no-negcache
|
||||
- --dns-loop-detect
|
||||
- --log-facility=-
|
||||
- --server=/{{dns_domain}}/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
|
2
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-addon-manager.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-addon-manager.yaml
generated
vendored
@@ -9,7 +9,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kube-addon-manager
|
||||
image: {{kube_docker_registry}}/kube-addon-manager:v8.6
|
||||
image: {{kube_docker_registry}}/kube-addon-manager:v8.7
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
|
5
vendor/k8s.io/kubernetes/test/kubemark/resources/start-kubemark-master.sh
generated
vendored
5
vendor/k8s.io/kubernetes/test/kubemark/resources/start-kubemark-master.sh
generated
vendored
@@ -346,7 +346,7 @@ function create-master-audit-policy {
|
||||
- group: "storage.k8s.io"'
|
||||
|
||||
cat <<EOF >"${path}"
|
||||
apiVersion: audit.k8s.io/v1beta1
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
rules:
|
||||
# The following requests were manually identified as high-volume and low-risk,
|
||||
@@ -520,9 +520,6 @@ function compute-kube-apiserver-params {
|
||||
if [[ -n "${STORAGE_MEDIA_TYPE:-}" ]]; then
|
||||
params+=" --storage-media-type=${STORAGE_MEDIA_TYPE}"
|
||||
fi
|
||||
if [[ -n "${ETCD_QUORUM_READ:-}" ]]; then
|
||||
params+=" --etcd-quorum-read=${ETCD_QUORUM_READ}"
|
||||
fi
|
||||
if [[ -n "${ETCD_COMPACTION_INTERVAL_SEC:-}" ]]; then
|
||||
params+=" --etcd-compaction-interval=${ETCD_COMPACTION_INTERVAL_SEC}s"
|
||||
fi
|
||||
|
18
vendor/k8s.io/kubernetes/test/kubemark/skeleton/util.sh
generated
vendored
18
vendor/k8s.io/kubernetes/test/kubemark/skeleton/util.sh
generated
vendored
@@ -18,6 +18,21 @@
|
||||
# Kubermark must implement to use test/kubemark/start-kubemark.sh and
|
||||
# test/kubemark/stop-kubemark.sh scripts.
|
||||
|
||||
# This function should authenticate docker to be able to read/write to
|
||||
# the right container registry (needed for pushing kubemark image).
|
||||
function authenticate-docker {
|
||||
echo "Configuring registry authentication" 1>&2
|
||||
}
|
||||
|
||||
# This function should get master IP address (creating one if needed).
|
||||
# ENV vars that should be defined by the end of this function:
|
||||
# - MASTER_IP
|
||||
#
|
||||
# Recommended for this function to include retrying logic in case of failures.
|
||||
function get-or-create-master-ip {
|
||||
echo "MASTER_IP: $MASTER_IP" 1>&2
|
||||
}
|
||||
|
||||
# This function should create a machine instance for the master along
|
||||
# with any/all of the following resources:
|
||||
# - Attach a PD to the master (optionally 1 more for storing events)
|
||||
@@ -26,8 +41,7 @@
|
||||
# Note: This step is compulsory in order for kubemark to work
|
||||
#
|
||||
# ENV vars that should be defined by the end of this function:
|
||||
# 1. MASTER_IP
|
||||
# 2. MASTER_NAME
|
||||
# - MASTER_NAME
|
||||
#
|
||||
# Recommended for this function to include retrying logic for the above
|
||||
# operations in case of failures.
|
||||
|
136
vendor/k8s.io/kubernetes/test/kubemark/start-kubemark.sh
generated
vendored
136
vendor/k8s.io/kubernetes/test/kubemark/start-kubemark.sh
generated
vendored
@@ -27,6 +27,11 @@ source "${KUBE_ROOT}/test/kubemark/skeleton/util.sh"
|
||||
source "${KUBE_ROOT}/test/kubemark/cloud-provider-config.sh"
|
||||
source "${KUBE_ROOT}/test/kubemark/${CLOUD_PROVIDER}/util.sh"
|
||||
source "${KUBE_ROOT}/cluster/kubemark/${CLOUD_PROVIDER}/config-default.sh"
|
||||
|
||||
if [[ -f "${KUBE_ROOT}/test/kubemark/${CLOUD_PROVIDER}/startup.sh" ]] ; then
|
||||
source "${KUBE_ROOT}/test/kubemark/${CLOUD_PROVIDER}/startup.sh"
|
||||
fi
|
||||
|
||||
source "${KUBE_ROOT}/cluster/kubemark/util.sh"
|
||||
|
||||
# hack/lib/init.sh will ovewrite ETCD_VERSION if this is unset
|
||||
@@ -59,7 +64,7 @@ SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-}"
|
||||
EVENT_PD="${EVENT_PD:-}"
|
||||
|
||||
# Etcd related variables.
|
||||
ETCD_IMAGE="${ETCD_IMAGE:-3.2.18-0}"
|
||||
ETCD_IMAGE="${ETCD_IMAGE:-3.2.24-1}"
|
||||
ETCD_VERSION="${ETCD_VERSION:-}"
|
||||
|
||||
# Controller-manager related variables.
|
||||
@@ -75,7 +80,6 @@ SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS:-}"
|
||||
APISERVER_TEST_ARGS="${APISERVER_TEST_ARGS:-}"
|
||||
STORAGE_MEDIA_TYPE="${STORAGE_MEDIA_TYPE:-}"
|
||||
STORAGE_BACKEND="${STORAGE_BACKEND:-etcd3}"
|
||||
ETCD_QUORUM_READ="${ETCD_QUORUM_READ:-}"
|
||||
ETCD_COMPACTION_INTERVAL_SEC="${ETCD_COMPACTION_INTERVAL_SEC:-}"
|
||||
RUNTIME_CONFIG="${RUNTIME_CONFIG:-}"
|
||||
NUM_NODES="${NUM_NODES:-}"
|
||||
@@ -189,65 +193,41 @@ function start-master-components {
|
||||
echo "The master has started and is now live."
|
||||
}
|
||||
|
||||
# Finds the right kubemark binary for 'linux/amd64' platform and uses it to
|
||||
# create a docker image for hollow-node and upload it to the appropriate
|
||||
# docker container registry for the cloud provider.
|
||||
# Create a docker image for hollow-node and upload it to the appropriate docker registry.
|
||||
function create-and-upload-hollow-node-image {
|
||||
MAKE_DIR="${KUBE_ROOT}/cluster/images/kubemark"
|
||||
KUBEMARK_BIN="$(kube::util::find-binary-for-platform kubemark linux/amd64)"
|
||||
if [[ -z "${KUBEMARK_BIN}" ]]; then
|
||||
echo 'Cannot find cmd/kubemark binary'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Configuring registry authentication"
|
||||
mkdir -p "${HOME}/.docker"
|
||||
gcloud beta auth configure-docker -q
|
||||
|
||||
echo "Copying kubemark binary to ${MAKE_DIR}"
|
||||
cp "${KUBEMARK_BIN}" "${MAKE_DIR}"
|
||||
CURR_DIR=`pwd`
|
||||
cd "${MAKE_DIR}"
|
||||
RETRIES=3
|
||||
authenticate-docker
|
||||
KUBEMARK_IMAGE_REGISTRY="${KUBEMARK_IMAGE_REGISTRY:-${CONTAINER_REGISTRY}/${PROJECT}}"
|
||||
for attempt in $(seq 1 ${RETRIES}); do
|
||||
if ! REGISTRY="${KUBEMARK_IMAGE_REGISTRY}" IMAGE_TAG="${KUBEMARK_IMAGE_TAG}" make "${KUBEMARK_IMAGE_MAKE_TARGET}"; then
|
||||
if [[ $((attempt)) -eq "${RETRIES}" ]]; then
|
||||
echo "${color_red}Make failed. Exiting.${color_norm}"
|
||||
exit 1
|
||||
fi
|
||||
echo -e "${color_yellow}Make attempt $(($attempt)) failed. Retrying.${color_norm}" >& 2
|
||||
sleep $(($attempt * 5))
|
||||
else
|
||||
break
|
||||
if [[ "${KUBEMARK_BAZEL_BUILD:-}" =~ ^[yY]$ ]]; then
|
||||
# Build+push the image through bazel.
|
||||
touch WORKSPACE # Needed for bazel.
|
||||
build_cmd=("bazel" "run" "//cluster/images/kubemark:push" "--define" "REGISTRY=${KUBEMARK_IMAGE_REGISTRY}" "--define" "IMAGE_TAG=${KUBEMARK_IMAGE_TAG}")
|
||||
run-cmd-with-retries "${build_cmd[@]}"
|
||||
else
|
||||
# Build+push the image through makefile.
|
||||
build_cmd=("make" "${KUBEMARK_IMAGE_MAKE_TARGET}")
|
||||
MAKE_DIR="${KUBE_ROOT}/cluster/images/kubemark"
|
||||
KUBEMARK_BIN="$(kube::util::find-binary-for-platform kubemark linux/amd64)"
|
||||
if [[ -z "${KUBEMARK_BIN}" ]]; then
|
||||
echo 'Cannot find cmd/kubemark binary'
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
rm kubemark
|
||||
cd $CURR_DIR
|
||||
echo "Copying kubemark binary to ${MAKE_DIR}"
|
||||
cp "${KUBEMARK_BIN}" "${MAKE_DIR}"
|
||||
CURR_DIR=`pwd`
|
||||
cd "${MAKE_DIR}"
|
||||
REGISTRY=${KUBEMARK_IMAGE_REGISTRY} IMAGE_TAG=${KUBEMARK_IMAGE_TAG} run-cmd-with-retries "${build_cmd[@]}"
|
||||
rm kubemark
|
||||
cd $CURR_DIR
|
||||
fi
|
||||
echo "Created and uploaded the kubemark hollow-node image to docker registry."
|
||||
# Cleanup the kubemark image after the script exits.
|
||||
if [[ "${CLEANUP_KUBEMARK_IMAGE:-}" == "true" ]]; then
|
||||
trap delete-kubemark-image EXIT
|
||||
fi
|
||||
}
|
||||
|
||||
# Use bazel rule to create a docker image for hollow-node and upload
|
||||
# it to the appropriate docker container registry for the cloud provider.
|
||||
function create-and-upload-hollow-node-image-bazel {
|
||||
echo "Configuring registry authentication"
|
||||
mkdir -p "${HOME}/.docker"
|
||||
gcloud beta auth configure-docker -q
|
||||
|
||||
RETRIES=3
|
||||
for attempt in $(seq 1 ${RETRIES}); do
|
||||
if ! bazel run //cluster/images/kubemark:push --define REGISTRY="${KUBEMARK_IMAGE_REGISTRY}" --define IMAGE_TAG="${KUBEMARK_IMAGE_TAG}"; then
|
||||
if [[ $((attempt)) -eq "${RETRIES}" ]]; then
|
||||
echo "${color_red}Image push failed. Exiting.${color_norm}"
|
||||
exit 1
|
||||
fi
|
||||
echo -e "${color_yellow}Make attempt $(($attempt)) failed. Retrying.${color_norm}" >& 2
|
||||
sleep $(($attempt * 5))
|
||||
else
|
||||
break
|
||||
fi
|
||||
done
|
||||
echo "Created and uploaded the kubemark hollow-node image to docker registry."
|
||||
function delete-kubemark-image {
|
||||
delete-image "${KUBEMARK_IMAGE_REGISTRY}/kubemark:${KUBEMARK_IMAGE_TAG}"
|
||||
}
|
||||
|
||||
# Generate secret and configMap for the hollow-node pods to work, prepare
|
||||
@@ -439,9 +419,8 @@ current-context: kubemark-context")
|
||||
sed -i'' -e "s'{{kubemark_image_registry}}'${KUBEMARK_IMAGE_REGISTRY}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
||||
sed -i'' -e "s/{{kubemark_image_tag}}/${KUBEMARK_IMAGE_TAG}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
||||
sed -i'' -e "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
||||
sed -i'' -e "s/{{kubelet_verbosity_level}}/${KUBELET_TEST_LOG_LEVEL}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
||||
sed -i'' -e "s/{{kubeproxy_verbosity_level}}/${KUBEPROXY_TEST_LOG_LEVEL}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
||||
sed -i'' -e "s/{{use_real_proxier}}/${USE_REAL_PROXIER}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
||||
sed -i'' -e "s/{{hollow_kubelet_params}}/${HOLLOW_KUBELET_TEST_ARGS}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
||||
sed -i'' -e "s/{{hollow_proxy_params}}/${HOLLOW_PROXY_TEST_ARGS}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
||||
sed -i'' -e "s'{{kubemark_mig_config}}'${KUBEMARK_MIG_CONFIG:-}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
||||
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/hollow-node.yaml" --namespace="kubemark"
|
||||
|
||||
@@ -485,30 +464,35 @@ function wait-for-hollow-nodes-to-run-or-timeout {
|
||||
|
||||
############################### Main Function ########################################
|
||||
detect-project &> /dev/null
|
||||
find-release-tars
|
||||
|
||||
# We need master IP to generate PKI and kubeconfig for cluster.
|
||||
get-or-create-master-ip
|
||||
generate-pki-config
|
||||
write-local-kubeconfig
|
||||
|
||||
# Setup for master.
|
||||
echo -e "${color_yellow}STARTING SETUP FOR MASTER${color_norm}"
|
||||
find-release-tars
|
||||
create-master-environment-file
|
||||
create-master-instance-with-resources
|
||||
generate-pki-config
|
||||
wait-for-master-reachability
|
||||
write-pki-config-to-master
|
||||
write-local-kubeconfig
|
||||
copy-resource-files-to-master
|
||||
start-master-components
|
||||
function start-master {
|
||||
echo -e "${color_yellow}STARTING SETUP FOR MASTER${color_norm}"
|
||||
create-master-environment-file
|
||||
create-master-instance-with-resources
|
||||
wait-for-master-reachability
|
||||
write-pki-config-to-master
|
||||
copy-resource-files-to-master
|
||||
start-master-components
|
||||
}
|
||||
start-master &
|
||||
|
||||
# Setup for hollow-nodes.
|
||||
echo ""
|
||||
echo -e "${color_yellow}STARTING SETUP FOR HOLLOW-NODES${color_norm}"
|
||||
if [[ "${KUBEMARK_BAZEL_BUILD:-}" =~ ^[yY]$ ]]; then
|
||||
create-and-upload-hollow-node-image-bazel
|
||||
else
|
||||
function start-hollow-nodes {
|
||||
echo -e "${color_yellow}STARTING SETUP FOR HOLLOW-NODES${color_norm}"
|
||||
create-and-upload-hollow-node-image
|
||||
fi
|
||||
create-kube-hollow-node-resources
|
||||
wait-for-hollow-nodes-to-run-or-timeout
|
||||
create-kube-hollow-node-resources
|
||||
wait-for-hollow-nodes-to-run-or-timeout
|
||||
}
|
||||
start-hollow-nodes &
|
||||
|
||||
wait
|
||||
echo ""
|
||||
echo "Master IP: ${MASTER_IP}"
|
||||
echo "Password to kubemark master: ${KUBE_PASSWORD}"
|
||||
|
5
vendor/k8s.io/kubernetes/test/kubemark/stop-kubemark.sh
generated
vendored
5
vendor/k8s.io/kubernetes/test/kubemark/stop-kubemark.sh
generated
vendored
@@ -22,6 +22,11 @@ source "${KUBE_ROOT}/test/kubemark/skeleton/util.sh"
|
||||
source "${KUBE_ROOT}/test/kubemark/cloud-provider-config.sh"
|
||||
source "${KUBE_ROOT}/test/kubemark/${CLOUD_PROVIDER}/util.sh"
|
||||
source "${KUBE_ROOT}/cluster/kubemark/${CLOUD_PROVIDER}/config-default.sh"
|
||||
|
||||
if [[ -f "${KUBE_ROOT}/test/kubemark/${CLOUD_PROVIDER}/shutdown.sh" ]] ; then
|
||||
source "${KUBE_ROOT}/test/kubemark/${CLOUD_PROVIDER}/shutdown.sh"
|
||||
fi
|
||||
|
||||
source "${KUBE_ROOT}/cluster/kubemark/util.sh"
|
||||
|
||||
KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
|
Reference in New Issue
Block a user