Add generated file

This PR adds generated files under pkg/client and vendor folder.
This commit is contained in:
xing-yang
2018-07-12 10:55:15 -07:00
parent 36b1de0341
commit e213d1890d
17729 changed files with 5090889 additions and 0 deletions

14
vendor/k8s.io/kubernetes/test/kubemark/BUILD generated vendored Normal file
View File

@@ -0,0 +1,14 @@
package(default_visibility = ["//visibility:public"])
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

8
vendor/k8s.io/kubernetes/test/kubemark/OWNERS generated vendored Normal file
View File

@@ -0,0 +1,8 @@
reviewers:
- gmarek
- shyamjvs
- wojtek-t
approvers:
- gmarek
- shyamjvs
- wojtek-t

View File

@@ -0,0 +1,21 @@
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CLOUD_PROVIDER="${CLOUD_PROVIDER:-gce}"
CONTAINER_REGISTRY="${CONTAINER_REGISTRY:-gcr.io}"
PROJECT="${PROJECT:-}"
KUBEMARK_IMAGE_REGISTRY="${KUBEMARK_IMAGE_REGISTRY:-}"
KUBEMARK_IMAGE_MAKE_TARGET="${KUBEMARK_IMAGE_MAKE_TARGET:-gcloudpush}"

44
vendor/k8s.io/kubernetes/test/kubemark/common/util.sh generated vendored Normal file
View File

@@ -0,0 +1,44 @@
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Running cmd $RETRIES times in case of failures.
function run-cmd-with-retries {
RETRIES="${RETRIES:-3}"
for attempt in $(seq 1 ${RETRIES}); do
local ret_val=0
exec 5>&1 # Duplicate &1 to &5 for use below.
# We don't use 'local' to declare result as then ret_val always gets value 0.
# We use tee to output to &5 (redirected to stdout) while also storing it in the variable.
result=$("$@" 2>&1 | tee >(cat - >&5)) || ret_val="$?"
if [[ "${ret_val:-0}" -ne "0" ]]; then
if [[ $(echo "${result}" | grep -c "already exists") -gt 0 ]]; then
if [[ "${attempt}" == 1 ]]; then
echo -e "${color_red}Failed to $1 $2 $3 as the resource hasn't been deleted from a previous run.${color_norm}" >& 2
exit 1
fi
echo -e "${color_yellow}Succeeded to $1 $2 $3 in the previous attempt, but status response wasn't received.${color_norm}"
return 0
fi
echo -e "${color_yellow}Attempt $attempt failed to $1 $2 $3. Retrying.${color_norm}" >& 2
sleep $(($attempt * 5))
else
echo -e "${color_green}Succeeded to $1 $2 $3.${color_norm}"
return 0
fi
done
echo -e "${color_red}Failed to $1 $2 $3.${color_norm}" >& 2
exit 1
}

20
vendor/k8s.io/kubernetes/test/kubemark/configure-kubectl.sh generated vendored Executable file
View File

@@ -0,0 +1,20 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script assumes that kubectl binary is present in PATH.
kubectl config set-cluster hollow-cluster --server=http://localhost:8080 --insecure-skip-tls-verify=true
kubectl config set-credentials $(whoami)
kubectl config set-context hollow-context --cluster=hollow-cluster --user=$(whoami)

127
vendor/k8s.io/kubernetes/test/kubemark/gce/util.sh generated vendored Normal file
View File

@@ -0,0 +1,127 @@
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../..
source "${KUBE_ROOT}/test/kubemark/common/util.sh"
# Wrapper for gcloud compute, running it $RETRIES times in case of failures.
# Args:
# $@: all stuff that goes after 'gcloud compute'
function run-gcloud-compute-with-retries {
run-cmd-with-retries gcloud compute "$@"
}
function create-master-instance-with-resources {
GCLOUD_COMMON_ARGS="--project ${PROJECT} --zone ${ZONE}"
run-gcloud-compute-with-retries disks create "${MASTER_NAME}-pd" \
${GCLOUD_COMMON_ARGS} \
--type "${MASTER_DISK_TYPE}" \
--size "${MASTER_DISK_SIZE}"
if [ "${EVENT_PD:-}" == "true" ]; then
run-gcloud-compute-with-retries disks create "${MASTER_NAME}-event-pd" \
${GCLOUD_COMMON_ARGS} \
--type "${MASTER_DISK_TYPE}" \
--size "${MASTER_DISK_SIZE}"
fi
run-gcloud-compute-with-retries addresses create "${MASTER_NAME}-ip" \
--project "${PROJECT}" \
--region "${REGION}" -q
MASTER_IP=$(gcloud compute addresses describe "${MASTER_NAME}-ip" \
--project "${PROJECT}" --region "${REGION}" -q --format='value(address)')
# Override the master image project to cos-cloud for COS images staring with `cos` string prefix.
DEFAULT_GCI_PROJECT=google-containers
if [[ "${GCI_VERSION}" == "cos"* ]]; then
DEFAULT_GCI_PROJECT=cos-cloud
fi
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-${DEFAULT_GCI_PROJECT}}
run-gcloud-compute-with-retries instances create "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} \
--address "${MASTER_IP}" \
--machine-type "${MASTER_SIZE}" \
--image-project="${MASTER_IMAGE_PROJECT}" \
--image "${MASTER_IMAGE}" \
--tags "${MASTER_TAG}" \
--subnet "${SUBNETWORK:-${NETWORK}}" \
--scopes "storage-ro,logging-write" \
--boot-disk-size "${MASTER_ROOT_DISK_SIZE}" \
--disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no"
run-gcloud-compute-with-retries instances add-metadata "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} \
--metadata-from-file startup-script="${KUBE_ROOT}/test/kubemark/resources/start-kubemark-master.sh"
if [ "${EVENT_PD:-}" == "true" ]; then
echo "Attaching ${MASTER_NAME}-event-pd to ${MASTER_NAME}"
run-gcloud-compute-with-retries instances attach-disk "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} \
--disk "${MASTER_NAME}-event-pd" \
--device-name="master-event-pd"
fi
run-gcloud-compute-with-retries firewall-rules create "${MASTER_NAME}-https" \
--project "${PROJECT}" \
--network "${NETWORK}" \
--source-ranges "0.0.0.0/0" \
--target-tags "${MASTER_TAG}" \
--allow "tcp:443"
}
# Command to be executed is '$1'.
# No. of retries is '$2' (if provided) or 1 (default).
function execute-cmd-on-master-with-retries() {
RETRIES="${2:-1}" run-gcloud-compute-with-retries ssh "${MASTER_NAME}" --zone="${ZONE}" --project="${PROJECT}" --command="$1"
}
function copy-files() {
run-gcloud-compute-with-retries scp --recurse --zone="${ZONE}" --project="${PROJECT}" $@
}
function delete-master-instance-and-resources {
GCLOUD_COMMON_ARGS="--project ${PROJECT} --zone ${ZONE} --quiet"
gcloud compute instances delete "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} || true
gcloud compute disks delete "${MASTER_NAME}-pd" \
${GCLOUD_COMMON_ARGS} || true
gcloud compute disks delete "${MASTER_NAME}-event-pd" \
${GCLOUD_COMMON_ARGS} &> /dev/null || true
gcloud compute addresses delete "${MASTER_NAME}-ip" \
--project "${PROJECT}" \
--region "${REGION}" \
--quiet || true
gcloud compute firewall-rules delete "${MASTER_NAME}-https" \
--project "${PROJECT}" \
--quiet || true
if [ "${SEPARATE_EVENT_MACHINE:-false}" == "true" ]; then
gcloud compute instances delete "${EVENT_STORE_NAME}" \
${GCLOUD_COMMON_ARGS} || true
gcloud compute disks delete "${EVENT_STORE_NAME}-pd" \
${GCLOUD_COMMON_ARGS} || true
fi
}

26
vendor/k8s.io/kubernetes/test/kubemark/master-log-dump.sh generated vendored Executable file
View File

@@ -0,0 +1,26 @@
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
REPORT_DIR="${1:-_artifacts}"
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source ${KUBE_ROOT}/test/kubemark/cloud-provider-config.sh
source ${KUBE_ROOT}/cluster/kubemark/${CLOUD_PROVIDER}/config-default.sh
export KUBEMARK_MASTER_NAME="${MASTER_NAME}"
echo "Dumping logs for kubemark master: ${KUBEMARK_MASTER_NAME}"
DUMP_ONLY_MASTER_LOGS=true ${KUBE_ROOT}/cluster/log-dump/log-dump.sh "${REPORT_DIR}"

View File

@@ -0,0 +1,54 @@
# Kubemark Pre-existing Provider Guide
**Kubemark Master**
- A set of Kubernetes control plane components running in a VM
**Kubernetes Cluster**
- A real Kubernetes Cluster that has master and nodes. The hollow-node pods
are run in this cluster, but appear as nodes to the Kubemark Master
## Introduction
Every running Kubemark setup looks like the following:
1) A running Kubernetes cluster pointed to by the local kubeconfig
2) A separate VM where the kubemark master is running
3) Some hollow-nodes that run on the Kubernetes Cluster from #1
4) The hollow-nodes are configured to talk with the kubemark master at #2
When using the pre-existing provider, the developer is responsible for creating
#1 and #2. Therefore, the kubemark scripts will not create any infrastructure
or start a kubemark master like in other providers. Instead, the existing
resources provided by the VM at $MASTER_IP will serve as the kubemark master.
## Use Case
The goal of the pre-existing provider is to use the kubemark tools with an
existing kubermark master. It's meant to provide the developer with
additional flexibility to customize the cluster infrastructure and still use
the kubemark setup tools. The pre-existing provider is an **advanced** use
case that requires the developer to have knowledge of setting up a kubemark
master.
## Requirements
To use the pre-existing provider, the expectation is that there's a kubemark
master that is rechable at $MASTER_IP. The machine that the kubemark master is
on has to be ssh able from the host that's executing the kubemark scripts. And
the user on that machine has to be 'kubernetes'.
Requirement checklist:
- Set MASTER_IP to ip address to the kubemark master
- The host where you execute the kubemark scripts must be able to ssh to
kubernetes@$MASTER_IP
## Example Configuration
_test/kubemark/cloud-provider-config.sh_
```
CLOUD_PROVIDER="pre-existing"
KUBEMARK_IMAGE_MAKE_TARGET="push"
CONTAINER_REGISTRY=docker.io
PROJECT="rthallisey"
MASTER_IP="192.168.121.29:6443"
```

View File

@@ -0,0 +1,27 @@
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../..
source "${KUBE_ROOT}/test/kubemark/common/util.sh"
# Leave the skeleton definition of execute-cmd-on-master-with-retries
# so only the pre-existing provider functions will target this.
function execute-cmd-on-pre-existing-master-with-retries() {
IP_WITHOUT_PORT=$(echo "${MASTER_IP}" | cut -f 1 -d ':') || "${MASTER_IP}"
RETRIES="${2:-1}" run-cmd-with-retries ssh kubernetes@"${IP_WITHOUT_PORT}" $1
}

View File

@@ -0,0 +1,94 @@
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "cluster-autoscaler",
"namespace": "kubemark",
"labels": {
"tier": "cluster-management",
"component": "cluster-autoscaler"
}
},
"spec": {
"hostNetwork": true,
"containers": [
{
"name": "cluster-autoscaler",
"image": "k8s.gcr.io/cluster-autoscaler:v1.0.0",
"command": [
"./run.sh",
"--kubernetes=https://{{master_ip}}:443?inClusterConfig=0&useServiceAccount=0&auth=/kubeconfig/cluster_autoscaler.kubeconfig",
"--v=4",
"--logtostderr=true",
"--write-status-configmap=true",
"--cloud-provider=kubemark",
"--nodes={{kubemark_autoscaler_min_nodes}}:{{kubemark_autoscaler_max_nodes}}:{{kubemark_autoscaler_mig_name}}"
],
"env": [
{
"name": "LOG_OUTPUT",
"value": "/var/log/cluster-autoscaler.log"
}
],
"resources": {
"requests": {
"cpu": "10m",
"memory": "300Mi"
}
},
"volumeMounts": [
{"name": "cloudconfigmount","mountPath": "/etc/gce.conf", "readOnly": true},
{
"name": "ssl-certs",
"readOnly": true,
"mountPath": "/etc/ssl/certs"
},
{
"name": "usrsharecacerts",
"readOnly": true,
"mountPath": "/usr/share/ca-certificates"
},
{
"name": "logdir",
"mountPath": "/var/log",
"readOnly": false
},
{
"name": "kubeconfig-volume",
"mountPath": "/kubeconfig"
}
],
"terminationMessagePath": "/dev/termination-log",
"imagePullPolicy": "Always"
}
],
"volumes": [
{"name": "cloudconfigmount","hostPath": {"path": "/etc/gce.conf"}},
{
"name": "ssl-certs",
"hostPath": {
"path": "/etc/ssl/certs"
}
},
{
"name": "usrsharecacerts",
"hostPath": {
"path": "/usr/share/ca-certificates"
}
},
{
"name": "logdir",
"hostPath": {
"path": "/var/log"
}
},
{
"name": "kubeconfig-volume",
"secret": {
"secretName": "kubeconfig"
}
}
],
"restartPolicy": "Always"
}
}

View File

@@ -0,0 +1,82 @@
{
"kind": "ReplicationController",
"apiVersion": "v1",
"metadata": {
"name": "heapster-v1.3.0",
"labels": {
"k8s-app": "heapster",
"version": "v1.3.0"
}
},
"spec": {
"replicas": 1,
"selector": {
"k8s-app": "heapster",
"version": "v1.3.0"
},
"template": {
"metadata": {
"labels": {
"k8s-app": "heapster",
"version": "v1.3.0"
}
},
"spec": {
"volumes": [
{
"name": "kubeconfig-volume",
"secret": {
"secretName": "kubeconfig"
}
}
],
"containers": [
{
"name": "heapster",
"image": "k8s.gcr.io/heapster:v1.3.0",
"resources": {
"requests": {
"cpu": "{{METRICS_CPU}}m",
"memory": "{{METRICS_MEM}}Mi"
}
},
"command": [
"/heapster"
],
"args": [
"--source=kubernetes:https://{{MASTER_IP}}:443?inClusterConfig=0&useServiceAccount=0&auth=/kubeconfig/heapster.kubeconfig"
],
"volumeMounts": [
{
"name": "kubeconfig-volume",
"mountPath": "/kubeconfig"
}
]
},
{
"name": "eventer",
"image": "k8s.gcr.io/heapster:v1.3.0",
"resources": {
"requests": {
"memory": "{{EVENTER_MEM}}Ki"
}
},
"command": [
"/eventer"
],
"args": [
"--source=kubernetes:https://{{MASTER_IP}}:443?inClusterConfig=0&useServiceAccount=0&auth=/kubeconfig/heapster.kubeconfig"
],
"volumeMounts": [
{
"name": "kubeconfig-volume",
"mountPath": "/kubeconfig"
}
]
}]
}
}
}
}

View File

@@ -0,0 +1,123 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: hollow-node
labels:
name: hollow-node
{{kubemark_mig_config}}
spec:
replicas: {{numreplicas}}
selector:
name: hollow-node
template:
metadata:
labels:
name: hollow-node
{{kubemark_mig_config}}
spec:
initContainers:
- name: init-inotify-limit
image: busybox
command: ['sysctl', '-w', 'fs.inotify.max_user_instances=200']
securityContext:
privileged: true
volumes:
- name: kubeconfig-volume
secret:
secretName: kubeconfig
- name: kernelmonitorconfig-volume
configMap:
name: node-configmap
- name: logs-volume
hostPath:
path: /var/log
- name: no-serviceaccount-access-to-real-master
emptyDir: {}
containers:
- name: hollow-kubelet
image: {{kubemark_image_registry}}/kubemark:{{kubemark_image_tag}}
ports:
- containerPort: 4194
- containerPort: 10250
- containerPort: 10255
env:
- name: CONTENT_TYPE
valueFrom:
configMapKeyRef:
name: node-configmap
key: content.type
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
command:
- /bin/sh
- -c
- /kubemark --morph=kubelet --name=$(NODE_NAME) --kubeconfig=/kubeconfig/kubelet.kubeconfig $(CONTENT_TYPE) --alsologtostderr {{kubelet_verbosity_level}} 1>>/var/log/kubelet-$(NODE_NAME).log 2>&1
volumeMounts:
- name: kubeconfig-volume
mountPath: /kubeconfig
readOnly: true
- name: logs-volume
mountPath: /var/log
resources:
requests:
cpu: 40m
memory: 100M
securityContext:
privileged: true
- name: hollow-proxy
image: {{kubemark_image_registry}}/kubemark:{{kubemark_image_tag}}
env:
- name: CONTENT_TYPE
valueFrom:
configMapKeyRef:
name: node-configmap
key: content.type
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
command:
- /bin/sh
- -c
- /kubemark --morph=proxy --name=$(NODE_NAME) --use-real-proxier={{use_real_proxier}} --kubeconfig=/kubeconfig/kubeproxy.kubeconfig $(CONTENT_TYPE) --alsologtostderr {{kubeproxy_verbosity_level}} 1>>/var/log/kubeproxy-$(NODE_NAME).log 2>&1
volumeMounts:
- name: kubeconfig-volume
mountPath: /kubeconfig
readOnly: true
- name: logs-volume
mountPath: /var/log
resources:
requests:
cpu: {{HOLLOW_PROXY_CPU}}m
memory: {{HOLLOW_PROXY_MEM}}Ki
- name: hollow-node-problem-detector
image: k8s.gcr.io/node-problem-detector:v0.4.1
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
command:
- /bin/sh
- -c
- /node-problem-detector --system-log-monitors=/config/kernel.monitor --apiserver-override="https://{{master_ip}}:443?inClusterConfig=false&auth=/kubeconfig/npd.kubeconfig" --alsologtostderr 1>>/var/log/npd-$(NODE_NAME).log 2>&1
volumeMounts:
- name: kubeconfig-volume
mountPath: /kubeconfig
readOnly: true
- name: kernelmonitorconfig-volume
mountPath: /config
readOnly: true
- name: no-serviceaccount-access-to-real-master
mountPath: /var/run/secrets/kubernetes.io/serviceaccount
readOnly: true
- name: logs-volume
mountPath: /var/log
resources:
requests:
cpu: 20m
memory: 20Mi
securityContext:
privileged: true

View File

@@ -0,0 +1,20 @@
{
"plugin": "filelog",
"pluginConfig": {
"timestamp": "dummy",
"message": "dummy",
"timestampFormat": "dummy"
},
"logPath": "/dev/null",
"lookback": "10m",
"bufferSize": 10,
"source": "kernel-monitor",
"conditions": [
{
"type": "KernelDeadlock",
"reason": "KernelHasNoDeadlock",
"message": "kernel has no deadlock"
}
],
"rules": []
}

View File

@@ -0,0 +1,188 @@
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kubemark
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: kubemark
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-dns
namespace: kubemark
labels:
addonmanager.kubernetes.io/mode: EnsureExists
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns
namespace: kubemark
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
- name: secret-volume
secret:
secretName: kubeconfig
containers:
- name: kubedns
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.9
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain={{dns_domain}}.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --kubecfg-file=/etc/secret-volume/dns.kubeconfig
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: secret-volume
mountPath: /etc/secret-volume
- name: dnsmasq
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.9
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --no-negcache
- --log-facility=-
- --server=/{{dns_domain}}/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.9
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{dns_domain}},5,SRV
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{dns_domain}},5,SRV
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
serviceAccountName: kube-dns

View File

@@ -0,0 +1,7 @@
{
"kind": "Namespace",
"apiVersion": "v1",
"metadata": {
"name": "kubemark"
}
}

View File

@@ -0,0 +1 @@
These resources are used to add extra (non-default) bindings to kubemark to match users and groups that are particular to the kubemark environment. These are not standard bootstrap bindings and not standard users they are bound to, and have been adapted from cluster/addons/e2e-rbac-bindings. Tighten/loosen these access rights as required in future.

View File

@@ -0,0 +1,16 @@
# This is the role binding for the kubemark cluster autoscaler.
# TODO: Use proper Cluster Autoscaler role (github.com/kubernetes/autoscaler/issues/383)
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cluster-autoscaler-view-binding
labels:
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:cluster-autoscaler

View File

@@ -0,0 +1,15 @@
# This is the role binding for the kubemark heapster.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: heapster-view-binding
labels:
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:heapster
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:heapster

View File

@@ -0,0 +1,15 @@
# This is the role binding for the kubemark kube-dns.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kube-dns-view-binding
labels:
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-dns
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:kube-dns

View File

@@ -0,0 +1,18 @@
# This is the role binding for the local kubectl, which is
# used for listing hollow-nodes in start-kubemark.sh and
# send resource creation requests, etc in run-e2e-tests.sh.
# Also useful if you manually want to use local kubectl.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubecfg-cluster-admin
labels:
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubecfg

View File

@@ -0,0 +1,18 @@
# The Kubemark environment currently gives all kubelets a single shared credential.
#
# TODO: give each kubelet a credential in the system:nodes group with username system:node:<nodeName>,
# to exercise the Node authorizer and admission, then remove this binding
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubelet-node
labels:
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubelet

View File

@@ -0,0 +1,15 @@
# This is the role binding for the node-problem-detector.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-problem-detector-binding
labels:
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node-problem-detector
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:node-problem-detector

View File

@@ -0,0 +1,51 @@
apiVersion: v1
kind: Pod
metadata:
name: etcd-server-events
namespace: kube-system
spec:
hostNetwork: true
nodeName: {{instance_prefix}}-master
containers:
- name: etcd-container
image: {{kube_docker_registry}}/etcd:{{etcd_image}}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 100m
command:
- /bin/sh
- -c
- /usr/local/bin/etcd
{{params}}
1>>/var/log/etcd-events.log 2>&1
livenessProbe:
httpGet:
host: 127.0.0.1
path: /health
port: 4002
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
ports:
- name: serverport
containerPort: 2381
hostPort: 2381
protocol: TCP
- name: clientport
containerPort: 4002
hostPort: 4002
protocol: TCP
volumeMounts:
- name: varetcd
mountPath: /var/etcd
- name: varlogetcd
mountPath: /var/log/etcd-events.log
volumes:
- name: varetcd
hostPath:
path: /var/etcd/events
- name: varlogetcd
hostPath:
path: /var/log/etcd-events.log
type: FileOrCreate

View File

@@ -0,0 +1,50 @@
apiVersion: v1
kind: Pod
metadata:
name: etcd-server
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: etcd-container
image: {{kube_docker_registry}}/etcd:{{etcd_image}}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 200m
command:
- /bin/sh
- -c
- /usr/local/bin/etcd
{{params}}
1>>/var/log/etcd.log 2>&1
livenessProbe:
httpGet:
host: 127.0.0.1
path: /health
port: 2379
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
ports:
- name: serverport
containerPort: 2380
hostPort: 2380
protocol: TCP
- name: clientport
containerPort: 2379
hostPort: 2379
protocol: TCP
volumeMounts:
- name: varetcd
mountPath: /var/etcd
- name: varlogetcd
mountPath: /var/log/etcd.log
volumes:
- name: varetcd
hostPath:
path: /var/etcd
- name: varlogetcd
hostPath:
path: /var/log/etcd.log
type: FileOrCreate

View File

@@ -0,0 +1,34 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-addon-manager
namespace: kube-system
labels:
component: kube-addon-manager
spec:
hostNetwork: true
containers:
- name: kube-addon-manager
image: {{kube_docker_registry}}/kube-addon-manager:v8.6
command:
- /bin/bash
- -c
- /opt/kube-addons.sh 1>>/var/log/kube-addon-manager.log 2>&1
resources:
requests:
cpu: 5m
memory: 50Mi
volumeMounts:
- name: addons
mountPath: /etc/kubernetes/
readOnly: true
- name: varlog
mountPath: /var/log/kube-addon-manager.log
volumes:
- name: addons
hostPath:
path: /etc/kubernetes/
- name: varlog
hostPath:
path: /var/log/kube-addon-manager.log
type: FileOrCreate

View File

@@ -0,0 +1,70 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-apiserver
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kube-apiserver
image: {{kube_docker_registry}}/kube-apiserver:{{kube-apiserver_docker_tag}}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 250m
command:
- /bin/sh
- -c
- /usr/local/bin/kube-apiserver
{{params}}
1>>/var/log/kube-apiserver.log 2>&1
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
ports:
- name: https
containerPort: 443
hostPort: 443
protocol: TCP
- name: local
containerPort: 8080
hostPort: 8080
protocol: TCP
volumeMounts:
{{audit_policy_config_mount}}
- name: srvkube
mountPath: /etc/srv/kubernetes
readOnly: true
- name: logfile
mountPath: /var/log/kube-apiserver.log
- name: etcssl
mountPath: /etc/ssl
readOnly: true
- name: usrsharecacerts
mountPath: /usr/share/ca-certificates
readOnly: true
- name: srvsshproxy
mountPath: /etc/srv/sshproxy
volumes:
{{audit_policy_config_volume}}
- name: srvkube
hostPath:
path: /etc/srv/kubernetes
- name: logfile
hostPath:
path: /var/log/kube-apiserver.log
type: FileOrCreate
- name: etcssl
hostPath:
path: /etc/ssl
- name: usrsharecacerts
hostPath:
path: /usr/share/ca-certificates
- name: srvsshproxy
hostPath:
path: /etc/srv/sshproxy

View File

@@ -0,0 +1,54 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-controller-manager
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kube-controller-manager
image: {{kube_docker_registry}}/kube-controller-manager:{{kube-controller-manager_docker_tag}}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 200m
command:
- /bin/sh
- -c
- /usr/local/bin/kube-controller-manager
{{params}}
1>>/var/log/kube-controller-manager.log 2>&1
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 10252
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
volumeMounts:
- name: srvkube
mountPath: /etc/srv/kubernetes
readOnly: true
- name: logfile
mountPath: /var/log/kube-controller-manager.log
- name: etcssl
mountPath: /etc/ssl
readOnly: true
- name: usrsharecacerts
mountPath: /usr/share/ca-certificates
readOnly: true
volumes:
- name: srvkube
hostPath:
path: /etc/srv/kubernetes
- name: logfile
hostPath:
path: /var/log/kube-controller-manager.log
type: FileOrCreate
- name: etcssl
hostPath:
path: /etc/ssl
- name: usrsharecacerts
hostPath:
path: /usr/share/ca-certificates

View File

@@ -0,0 +1,43 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-scheduler
namespace: kube-system
spec:
hostNetwork: true
nodeName: {{instance_prefix}}-master
containers:
- name: kube-scheduler
image: {{kube_docker_registry}}/kube-scheduler:{{kube-scheduler_docker_tag}}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 100m
command:
- /bin/sh
- -c
- /usr/local/bin/kube-scheduler
{{params}}
1>>/var/log/kube-scheduler.log 2>&1
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 10251
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
volumeMounts:
- name: srvkube
mountPath: /etc/srv/kubernetes
readOnly: true
- name: logfile
mountPath: /var/log/kube-scheduler.log
volumes:
- name: srvkube
hostPath:
path: /etc/srv/kubernetes
- name: logfile
hostPath:
path: /var/log/kube-scheduler.log
type: FileOrCreate

View File

@@ -0,0 +1,732 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script that starts kubelet on kubemark-master as a supervisord process
# and then runs the master components as pods using kubelet.
set -o errexit
set -o nounset
set -o pipefail
# Define key path variables.
KUBE_ROOT="/home/kubernetes"
KUBE_BINDIR="${KUBE_ROOT}/kubernetes/server/bin"
function config-ip-firewall {
echo "Configuring IP firewall rules"
# The GCI image has host firewall which drop most inbound/forwarded packets.
# We need to add rules to accept all TCP/UDP/ICMP packets.
if iptables -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then
echo "Add rules to accept all inbound TCP/UDP/ICMP packets"
iptables -A INPUT -w -p TCP -j ACCEPT
iptables -A INPUT -w -p UDP -j ACCEPT
iptables -A INPUT -w -p ICMP -j ACCEPT
fi
if iptables -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then
echo "Add rules to accept all forwarded TCP/UDP/ICMP packets"
iptables -A FORWARD -w -p TCP -j ACCEPT
iptables -A FORWARD -w -p UDP -j ACCEPT
iptables -A FORWARD -w -p ICMP -j ACCEPT
fi
}
function create-dirs {
echo "Creating required directories"
mkdir -p /var/lib/kubelet
mkdir -p /etc/kubernetes/manifests
mkdir -p /etc/kubernetes/addons
}
# Setup working directory for kubelet.
function setup-kubelet-dir {
echo "Making /var/lib/kubelet executable for kubelet"
mount -B /var/lib/kubelet /var/lib/kubelet/
mount -B -o remount,exec,suid,dev /var/lib/kubelet
}
# Remove any default etcd config dirs/files.
function delete-default-etcd-configs {
if [[ -d /etc/etcd ]]; then
rm -rf /etc/etcd
fi
if [[ -e /etc/default/etcd ]]; then
rm -f /etc/default/etcd
fi
if [[ -e /etc/systemd/system/etcd.service ]]; then
rm -f /etc/systemd/system/etcd.service
fi
if [[ -e /etc/init.d/etcd ]]; then
rm -f /etc/init.d/etcd
fi
}
# Compute etcd related variables.
function compute-etcd-variables {
ETCD_IMAGE="${ETCD_IMAGE:-}"
ETCD_QUOTA_BYTES=""
if [ "${ETCD_VERSION:0:2}" == "3." ]; then
# TODO: Set larger quota to see if that helps with
# 'mvcc: database space exceeded' errors. If so, pipe
# though our setup scripts.
ETCD_QUOTA_BYTES=" --quota-backend-bytes=4294967296 "
fi
}
# Formats the given device ($1) if needed and mounts it at given mount point
# ($2).
function safe-format-and-mount() {
device=$1
mountpoint=$2
# Format only if the disk is not already formatted.
if ! tune2fs -l "${device}" ; then
echo "Formatting '${device}'"
mkfs.ext4 -F "${device}"
fi
echo "Mounting '${device}' at '${mountpoint}'"
mount -o discard,defaults "${device}" "${mountpoint}"
}
# Finds a PD device with name '$1' attached to the master.
function find-attached-pd() {
local -r pd_name=$1
if [[ ! -e /dev/disk/by-id/${pd_name} ]]; then
echo ""
fi
device_info=$(ls -l /dev/disk/by-id/${pd_name})
relative_path=${device_info##* }
echo "/dev/disk/by-id/${relative_path}"
}
# Mounts a persistent disk (formatting if needed) to store the persistent data
# on the master. safe-format-and-mount only formats an unformatted disk, and
# mkdir -p will leave a directory be if it already exists.
function mount-pd() {
local -r pd_name=$1
local -r mount_point=$2
if [[ -z "${find-attached-pd ${pd_name}}" ]]; then
echo "Can't find ${pd_name}. Skipping mount."
return
fi
local -r pd_path="/dev/disk/by-id/${pd_name}"
echo "Mounting PD '${pd_path}' at '${mount_point}'"
# Format and mount the disk, create directories on it for all of the master's
# persistent data, and link them to where they're used.
mkdir -p "${mount_point}"
safe-format-and-mount "${pd_path}" "${mount_point}"
echo "Mounted PD '${pd_path}' at '${mount_point}'"
# NOTE: These locations on the PD store persistent data, so to maintain
# upgradeability, these locations should not change. If they do, take care
# to maintain a migration path from these locations to whatever new
# locations.
}
# Create kubeconfig for controller-manager's service account authentication.
function create-kubecontrollermanager-kubeconfig {
echo "Creating kube-controller-manager kubeconfig file"
mkdir -p "${KUBE_ROOT}/k8s_auth_data/kube-controller-manager"
cat <<EOF >"${KUBE_ROOT}/k8s_auth_data/kube-controller-manager/kubeconfig"
apiVersion: v1
kind: Config
users:
- name: kube-controller-manager
user:
token: ${KUBE_CONTROLLER_MANAGER_TOKEN}
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
server: https://localhost:443
contexts:
- context:
cluster: local
user: kube-controller-manager
name: service-account-context
current-context: service-account-context
EOF
}
function create-kubescheduler-kubeconfig {
echo "Creating kube-scheduler kubeconfig file"
mkdir -p "${KUBE_ROOT}/k8s_auth_data/kube-scheduler"
cat <<EOF >"${KUBE_ROOT}/k8s_auth_data/kube-scheduler/kubeconfig"
apiVersion: v1
kind: Config
users:
- name: kube-scheduler
user:
token: ${KUBE_SCHEDULER_TOKEN}
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
server: https://localhost:443
contexts:
- context:
cluster: local
user: kube-scheduler
name: kube-scheduler
current-context: kube-scheduler
EOF
}
function assemble-docker-flags {
echo "Assemble docker command line flags"
local docker_opts="-p /var/run/docker.pid --iptables=false --ip-masq=false"
docker_opts+=" --log-level=debug" # Since it's a test cluster
# TODO(shyamjvs): Incorporate network plugin options, etc later.
echo "DOCKER_OPTS=\"${docker_opts}\"" > /etc/default/docker
echo "DOCKER_NOFILE=65536" >> /etc/default/docker # For setting ulimit -n
systemctl restart docker
}
# A helper function for loading a docker image. It keeps trying up to 5 times.
#
# $1: Full path of the docker image
function try-load-docker-image {
local -r img=$1
echo "Try to load docker image file ${img}"
# Temporarily turn off errexit, because we don't want to exit on first failure.
set +e
local -r max_attempts=5
local -i attempt_num=1
until timeout 30 docker load -i "${img}"; do
if [[ "${attempt_num}" == "${max_attempts}" ]]; then
echo "Fail to load docker image file ${img} after ${max_attempts} retries. Exit!!"
exit 1
else
attempt_num=$((attempt_num+1))
sleep 5
fi
done
# Re-enable errexit.
set -e
}
# Loads kube-system docker images. It is better to do it before starting kubelet,
# as kubelet will restart docker daemon, which may interfere with loading images.
function load-docker-images {
echo "Start loading kube-system docker images"
local -r img_dir="${KUBE_BINDIR}"
try-load-docker-image "${img_dir}/kube-apiserver.tar"
try-load-docker-image "${img_dir}/kube-controller-manager.tar"
try-load-docker-image "${img_dir}/kube-scheduler.tar"
}
# Computes command line arguments to be passed to kubelet.
function compute-kubelet-params {
local params="${KUBELET_TEST_ARGS:-}"
params+=" --allow-privileged=true"
params+=" --cgroup-root=/"
params+=" --cloud-provider=gce"
params+=" --pod-manifest-path=/etc/kubernetes/manifests"
if [[ -n "${KUBELET_PORT:-}" ]]; then
params+=" --port=${KUBELET_PORT}"
fi
params+=" --enable-debugging-handlers=false"
params+=" --hairpin-mode=none"
echo "${params}"
}
# Creates the systemd config file for kubelet.service.
function create-kubelet-conf() {
local -r kubelet_bin="$1"
local -r kubelet_env_file="/etc/default/kubelet"
local -r flags=$(compute-kubelet-params)
echo "KUBELET_OPTS=\"${flags}\"" > "${kubelet_env_file}"
# Write the systemd service file for kubelet.
cat <<EOF >/etc/systemd/system/kubelet.service
[Unit]
Description=Kubermark kubelet
Requires=network-online.target
After=network-online.target
[Service]
Restart=always
RestartSec=10
EnvironmentFile=${kubelet_env_file}
ExecStart=${kubelet_bin} \$KUBELET_OPTS
[Install]
WantedBy=multi-user.target
EOF
}
# This function assembles the kubelet systemd service file and starts it using
# systemctl, on the kubemark master.
function start-kubelet {
# Create systemd config.
local -r kubelet_bin="/usr/bin/kubelet"
create-kubelet-conf "${kubelet_bin}"
# Flush iptables nat table
iptables -t nat -F || true
# Start the kubelet service.
systemctl start kubelet.service
}
# Create the log file and set its properties.
#
# $1 is the file to create.
function prepare-log-file {
touch $1
chmod 644 $1
chown root:root $1
}
# A helper function for copying addon manifests and set dir/files
# permissions.
#
# $1: addon category under /etc/kubernetes
# $2: manifest source dir
function setup-addon-manifests {
local -r src_dir="${KUBE_ROOT}/$2"
local -r dst_dir="/etc/kubernetes/$1/$2"
if [[ ! -d "${dst_dir}" ]]; then
mkdir -p "${dst_dir}"
fi
local files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml")
if [[ -n "${files}" ]]; then
cp "${src_dir}/"*.yaml "${dst_dir}"
fi
chown -R root:root "${dst_dir}"
chmod 755 "${dst_dir}"
chmod 644 "${dst_dir}"/*
}
# Write the config for the audit policy.
# Note: This duplicates the function in cluster/gce/gci/configure-helper.sh.
# TODO: Get rid of this function when #53321 is fixed.
function create-master-audit-policy {
local -r path="${1}"
local -r policy="${2:-}"
if [[ -n "${policy}" ]]; then
echo "${policy}" > "${path}"
return
fi
# Known api groups
local -r known_apis='
- group: "" # core
- group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io"
- group: "apiregistration.k8s.io"
- group: "apps"
- group: "authentication.k8s.io"
- group: "authorization.k8s.io"
- group: "autoscaling"
- group: "batch"
- group: "certificates.k8s.io"
- group: "extensions"
- group: "metrics"
- group: "networking.k8s.io"
- group: "policy"
- group: "rbac.authorization.k8s.io"
- group: "settings.k8s.io"
- group: "storage.k8s.io"'
cat <<EOF >"${path}"
apiVersion: audit.k8s.io/v1beta1
kind: Policy
rules:
# The following requests were manually identified as high-volume and low-risk,
# so drop them.
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core
resources: ["endpoints", "services", "services/status"]
- level: None
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
# TODO(#46983): Change this to the ingress controller service account.
users: ["system:unsecured"]
namespaces: ["kube-system"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["configmaps"]
- level: None
users: ["kubelet"] # legacy kubelet identity
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
userGroups: ["system:nodes"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
users:
- system:kube-controller-manager
- system:kube-scheduler
- system:serviceaccount:kube-system:endpoint-controller
verbs: ["get", "update"]
namespaces: ["kube-system"]
resources:
- group: "" # core
resources: ["endpoints"]
- level: None
users: ["system:apiserver"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
# Don't log HPA fetching metrics.
- level: None
users:
- system:kube-controller-manager
verbs: ["get", "list"]
resources:
- group: "metrics"
# Don't log these read-only URLs.
- level: None
nonResourceURLs:
- /healthz*
- /version
- /swagger*
# Don't log events requests.
- level: None
resources:
- group: "" # core
resources: ["events"]
# node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
- level: Request
users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"]
verbs: ["update","patch"]
resources:
- group: "" # core
resources: ["nodes/status", "pods/status"]
omitStages:
- "RequestReceived"
- level: Request
userGroups: ["system:nodes"]
verbs: ["update","patch"]
resources:
- group: "" # core
resources: ["nodes/status", "pods/status"]
omitStages:
- "RequestReceived"
# deletecollection calls can be large, don't log responses for expected namespace deletions
- level: Request
users: ["system:serviceaccount:kube-system:namespace-controller"]
verbs: ["deletecollection"]
omitStages:
- "RequestReceived"
# Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,
# so only log at the Metadata level.
- level: Metadata
resources:
- group: "" # core
resources: ["secrets", "configmaps"]
- group: authentication.k8s.io
resources: ["tokenreviews"]
omitStages:
- "RequestReceived"
# Get repsonses can be large; skip them.
- level: Request
verbs: ["get", "list", "watch"]
resources: ${known_apis}
omitStages:
- "RequestReceived"
# Default level for known APIs
- level: RequestResponse
resources: ${known_apis}
omitStages:
- "RequestReceived"
# Default level for all other requests.
- level: Metadata
omitStages:
- "RequestReceived"
EOF
}
# Computes command line arguments to be passed to etcd.
function compute-etcd-params {
local params="${ETCD_TEST_ARGS:-}"
params+=" --listen-peer-urls=http://127.0.0.1:2380"
params+=" --advertise-client-urls=http://127.0.0.1:2379"
params+=" --listen-client-urls=http://0.0.0.0:2379"
params+=" --data-dir=/var/etcd/data"
params+=" ${ETCD_QUOTA_BYTES}"
echo "${params}"
}
# Computes command line arguments to be passed to etcd-events.
function compute-etcd-events-params {
local params="${ETCD_TEST_ARGS:-}"
params+=" --listen-peer-urls=http://127.0.0.1:2381"
params+=" --advertise-client-urls=http://127.0.0.1:4002"
params+=" --listen-client-urls=http://0.0.0.0:4002"
params+=" --data-dir=/var/etcd/data-events"
params+=" ${ETCD_QUOTA_BYTES}"
echo "${params}"
}
# Computes command line arguments to be passed to apiserver.
function compute-kube-apiserver-params {
local params="${APISERVER_TEST_ARGS:-}"
params+=" --insecure-bind-address=0.0.0.0"
if [[ -z "${ETCD_SERVERS:-}" ]]; then
params+=" --etcd-servers=http://127.0.0.1:2379"
params+=" --etcd-servers-overrides=/events#${EVENT_STORE_URL}"
else
params+=" --etcd-servers=${ETCD_SERVERS}"
fi
params+=" --tls-cert-file=/etc/srv/kubernetes/server.cert"
params+=" --tls-private-key-file=/etc/srv/kubernetes/server.key"
params+=" --requestheader-client-ca-file=/etc/srv/kubernetes/aggr_ca.crt"
params+=" --requestheader-allowed-names=aggregator"
params+=" --requestheader-extra-headers-prefix=X-Remote-Extra-"
params+=" --requestheader-group-headers=X-Remote-Group"
params+=" --requestheader-username-headers=X-Remote-User"
params+=" --proxy-client-cert-file=/etc/srv/kubernetes/proxy_client.crt"
params+=" --proxy-client-key-file=/etc/srv/kubernetes/proxy_client.key"
params+=" --enable-aggregator-routing=true"
params+=" --client-ca-file=/etc/srv/kubernetes/ca.crt"
params+=" --token-auth-file=/etc/srv/kubernetes/known_tokens.csv"
params+=" --secure-port=443"
params+=" --basic-auth-file=/etc/srv/kubernetes/basic_auth.csv"
params+=" --target-ram-mb=$((${NUM_NODES} * 60))"
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
params+=" --admission-control=${CUSTOM_ADMISSION_PLUGINS}"
params+=" --authorization-mode=Node,RBAC"
params+=" --allow-privileged=true"
if [[ -n "${STORAGE_BACKEND:-}" ]]; then
params+=" --storage-backend=${STORAGE_BACKEND}"
fi
if [[ -n "${STORAGE_MEDIA_TYPE:-}" ]]; then
params+=" --storage-media-type=${STORAGE_MEDIA_TYPE}"
fi
if [[ -n "${ETCD_QUORUM_READ:-}" ]]; then
params+=" --etcd-quorum-read=${ETCD_QUORUM_READ}"
fi
if [[ -n "${ETCD_COMPACTION_INTERVAL_SEC:-}" ]]; then
params+=" --etcd-compaction-interval=${ETCD_COMPACTION_INTERVAL_SEC}s"
fi
if [[ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]]; then
params+=" --min-request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT}"
fi
if [[ "${NUM_NODES}" -ge 3000 ]]; then
params+=" --max-requests-inflight=3000 --max-mutating-requests-inflight=1000"
elif [[ "${NUM_NODES}" -ge 1000 ]]; then
params+=" --max-requests-inflight=1500 --max-mutating-requests-inflight=500"
fi
if [[ -n "${RUNTIME_CONFIG:-}" ]]; then
params+=" --runtime-config=${RUNTIME_CONFIG}"
fi
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi
if [[ "${ENABLE_APISERVER_ADVANCED_AUDIT:-}" == "true" ]]; then
# Create the audit policy file, and mount it into the apiserver pod.
create-master-audit-policy "${audit_policy_file}" "${ADVANCED_AUDIT_POLICY:-}"
# The config below matches the one in cluster/gce/gci/configure-helper.sh.
# TODO: Currently supporting just log backend. Support webhook if needed.
params+=" --audit-policy-file=${audit_policy_file}"
params+=" --audit-log-path=/var/log/kube-apiserver-audit.log"
params+=" --audit-log-maxage=0"
params+=" --audit-log-maxbackup=0"
params+=" --audit-log-maxsize=2000000000"
fi
echo "${params}"
}
# Computes command line arguments to be passed to controller-manager.
function compute-kube-controller-manager-params {
local params="${CONTROLLER_MANAGER_TEST_ARGS:-}"
params+=" --use-service-account-credentials"
params+=" --kubeconfig=/etc/srv/kubernetes/kube-controller-manager/kubeconfig"
params+=" --service-account-private-key-file=/etc/srv/kubernetes/server.key"
params+=" --root-ca-file=/etc/srv/kubernetes/ca.crt"
params+=" --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}"
params+=" --cluster-cidr=${CLUSTER_IP_RANGE}"
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
params+=" --terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}"
echo "${params}"
}
# Computes command line arguments to be passed to scheduler.
function compute-kube-scheduler-params {
local params="${SCHEDULER_TEST_ARGS:-}"
params+=" --kubeconfig=/etc/srv/kubernetes/kube-scheduler/kubeconfig"
echo "${params}"
}
# Computes command line arguments to be passed to addon-manager.
function compute-kube-addon-manager-params {
echo ""
}
# Start a kubernetes master component '$1' which can be any of the following:
# 1. etcd
# 2. etcd-events
# 3. kube-apiserver
# 4. kube-controller-manager
# 5. kube-scheduler
# 6. kube-addon-manager
#
# It prepares the log file, loads the docker tag, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars:
# DOCKER_REGISTRY
function start-kubemaster-component() {
echo "Start master component $1"
local -r component=$1
prepare-log-file /var/log/"${component}".log
local -r src_file="${KUBE_ROOT}/${component}.yaml"
local -r params=$(compute-${component}-params)
# Evaluate variables.
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{kube_docker_registry}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{instance_prefix}}@${INSTANCE_PREFIX}@g" "${src_file}"
if [ "${component:0:4}" == "etcd" ]; then
sed -i -e "s@{{etcd_image}}@${ETCD_IMAGE}@g" "${src_file}"
elif [ "${component}" == "kube-addon-manager" ]; then
setup-addon-manifests "addons" "kubemark-rbac-bindings"
else
local -r component_docker_tag=$(cat ${KUBE_BINDIR}/${component}.docker_tag)
sed -i -e "s@{{${component}_docker_tag}}@${component_docker_tag}@g" "${src_file}"
if [ "${component}" == "kube-apiserver" ]; then
local audit_policy_config_mount=""
local audit_policy_config_volume=""
if [[ "${ENABLE_APISERVER_ADVANCED_AUDIT:-}" == "true" ]]; then
read -d '' audit_policy_config_mount << EOF
- name: auditpolicyconfigmount
mountPath: ${audit_policy_file}
readOnly: true
EOF
read -d '' audit_policy_config_volume << EOF
- name: auditpolicyconfigmount
hostPath:
path: ${audit_policy_file}
type: FileOrCreate
EOF
fi
sed -i -e "s@{{audit_policy_config_mount}}@${audit_policy_config_mount}@g" "${src_file}"
sed -i -e "s@{{audit_policy_config_volume}}@${audit_policy_config_volume}@g" "${src_file}"
fi
fi
cp "${src_file}" /etc/kubernetes/manifests
}
############################### Main Function ########################################
echo "Start to configure master instance for kubemark"
# Extract files from the server tar and setup master env variables.
cd "${KUBE_ROOT}"
if [[ ! -d "${KUBE_ROOT}/kubernetes" ]]; then
tar xzf kubernetes-server-linux-amd64.tar.gz
fi
source "${KUBE_ROOT}/kubemark-master-env.sh"
# Setup IP firewall rules, required directory structure and etcd config.
config-ip-firewall
create-dirs
setup-kubelet-dir
delete-default-etcd-configs
compute-etcd-variables
# Setup authentication tokens and kubeconfigs for kube-controller-manager and kube-scheduler,
# only if their kubeconfigs don't already exist as this script could be running on reboot.
if [[ ! -f "${KUBE_ROOT}/k8s_auth_data/kube-controller-manager/kubeconfig" ]]; then
KUBE_CONTROLLER_MANAGER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
echo "${KUBE_CONTROLLER_MANAGER_TOKEN},system:kube-controller-manager,uid:system:kube-controller-manager" >> "${KUBE_ROOT}/k8s_auth_data/known_tokens.csv"
create-kubecontrollermanager-kubeconfig
fi
if [[ ! -f "${KUBE_ROOT}/k8s_auth_data/kube-scheduler/kubeconfig" ]]; then
KUBE_SCHEDULER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
echo "${KUBE_SCHEDULER_TOKEN},system:kube-scheduler,uid:system:kube-scheduler" >> "${KUBE_ROOT}/k8s_auth_data/known_tokens.csv"
create-kubescheduler-kubeconfig
fi
# Mount master PD for etcd and create symbolic links to it.
{
main_etcd_mount_point="/mnt/disks/master-pd"
mount-pd "google-master-pd" "${main_etcd_mount_point}"
# Contains all the data stored in etcd.
mkdir -m 700 -p "${main_etcd_mount_point}/var/etcd"
ln -s -f "${main_etcd_mount_point}/var/etcd" /var/etcd
mkdir -p /etc/srv
# Setup the dynamically generated apiserver auth certs and keys to pd.
mkdir -p "${main_etcd_mount_point}/srv/kubernetes"
ln -s -f "${main_etcd_mount_point}/srv/kubernetes" /etc/srv/kubernetes
# Copy the files to the PD only if they don't exist (so we do it only the first time).
if [[ "$(ls -A ${main_etcd_mount_point}/srv/kubernetes/)" == "" ]]; then
cp -r "${KUBE_ROOT}"/k8s_auth_data/* "${main_etcd_mount_point}/srv/kubernetes/"
fi
# Directory for kube-apiserver to store SSH key (if necessary).
mkdir -p "${main_etcd_mount_point}/srv/sshproxy"
ln -s -f "${main_etcd_mount_point}/srv/sshproxy" /etc/srv/sshproxy
}
# Mount master PD for event-etcd (if required) and create symbolic links to it.
{
EVENT_STORE_IP="${EVENT_STORE_IP:-127.0.0.1}"
EVENT_STORE_URL="${EVENT_STORE_URL:-http://${EVENT_STORE_IP}:4002}"
if [ "${EVENT_PD:-}" == "true" ]; then
event_etcd_mount_point="/mnt/disks/master-event-pd"
mount-pd "google-master-event-pd" "${event_etcd_mount_point}"
# Contains all the data stored in event etcd.
mkdir -m 700 -p "${event_etcd_mount_point}/var/etcd/events"
ln -s -f "${event_etcd_mount_point}/var/etcd/events" /var/etcd/events
fi
}
# Setup docker flags and load images of the master components.
assemble-docker-flags
DOCKER_REGISTRY="k8s.gcr.io"
load-docker-images
readonly audit_policy_file="/etc/audit_policy.config"
# Start kubelet as a supervisord process and master components as pods.
start-kubelet
start-kubemaster-component "etcd"
if [ "${EVENT_STORE_IP:-}" == "127.0.0.1" ]; then
start-kubemaster-component "etcd-events"
fi
start-kubemaster-component "kube-apiserver"
start-kubemaster-component "kube-controller-manager"
start-kubemaster-component "kube-scheduler"
start-kubemaster-component "kube-addon-manager"
# Wait till apiserver is working fine or timeout.
echo -n "Waiting for apiserver to be healthy"
start=$(date +%s)
until [ "$(curl 127.0.0.1:8080/healthz 2> /dev/null)" == "ok" ]; do
echo -n "."
sleep 1
now=$(date +%s)
if [ $((now - start)) -gt 300 ]; then
echo "Timeout!"
exit 1
fi
done
echo "Done for the configuration for kubermark master"

52
vendor/k8s.io/kubernetes/test/kubemark/run-e2e-tests.sh generated vendored Executable file
View File

@@ -0,0 +1,52 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
export KUBERNETES_PROVIDER="kubemark"
export KUBE_CONFIG_FILE="config-default.sh"
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
# We need an absolute path to KUBE_ROOT
ABSOLUTE_ROOT=$(readlink -f ${KUBE_ROOT})
source "${KUBE_ROOT}/cluster/kubemark/util.sh"
echo "Kubemark master name: ${MASTER_NAME}"
detect-master
export KUBE_MASTER_URL="https://${KUBE_MASTER_IP}"
export KUBECONFIG="${ABSOLUTE_ROOT}/test/kubemark/resources/kubeconfig.kubemark"
export E2E_MIN_STARTUP_PODS=0
if [[ -z "$@" ]]; then
ARGS='--ginkgo.focus=[Feature:Performance]'
else
ARGS=$@
fi
if [[ "${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER}" == "true" ]]; then
ARGS="${ARGS} --kubemark-external-kubeconfig=${DEFAULT_KUBECONFIG}"
fi
if [[ -f /.dockerenv ]]; then
# Running inside a dockerized runner.
go run ./hack/e2e.go -- --check-version-skew=false --test --test_args="--e2e-verify-service-account=false --dump-logs-on-failure=false ${ARGS}"
else
# Running locally.
ARGS=$(echo $ARGS | sed 's/\[/\\\[/g' | sed 's/\]/\\\]/g')
${KUBE_ROOT}/hack/ginkgo-e2e.sh "--e2e-verify-service-account=false" "--dump-logs-on-failure=false" $ARGS
fi

View File

@@ -0,0 +1,62 @@
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script contains the helper functions that each provider hosting
# Kubermark must implement to use test/kubemark/start-kubemark.sh and
# test/kubemark/stop-kubemark.sh scripts.
# This function should create a machine instance for the master along
# with any/all of the following resources:
# - Attach a PD to the master (optionally 1 more for storing events)
# - A public IP address for the master ($MASTER_IP)
# - A network firewall rule allowing all TCP traffic on port 443 in master
# Note: This step is compulsory in order for kubemark to work
#
# ENV vars that should be defined by the end of this function:
# 1. MASTER_IP
# 2. MASTER_NAME
#
# Recommended for this function to include retrying logic for the above
# operations in case of failures.
function create-master-instance-with-resources {
echo "MASTER_IP: $MASTER_IP" 1>&2
echo "MASTER_NAME: $MASTER_NAME" 1>&2
}
# This function should execute the command('$1') on the master machine
# (possibly through SSH), retrying in case of failure. The allowed number of
# retries would be '$2' (if not provided, default to single try).
function execute-cmd-on-master-with-retries() {
echo "Executing command on the master" 1>&2
}
# This function should act as an scp for the kubemark cluster, which copies
# the files given by the first n-1 arguments to the remote location given
# by the n^th argument.
#
# Recommended for this function to include retrying logic in case of failures.
function copy-files() {
echo "Copying files" 1>&2
}
# This function should delete the master instance along with all the
# resources that have been allocated inside the function
# 'create-master-instance-with-resources' above.
#
# Recommended for this function to include retrying logic in case of failures.
function delete-master-instance-and-resources {
echo "Deleting master instance and its allocated resources" 1>&2
}

515
vendor/k8s.io/kubernetes/test/kubemark/start-kubemark.sh generated vendored Executable file
View File

@@ -0,0 +1,515 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script that creates a Kubemark cluster for any given cloud provider.
set -o errexit
set -o nounset
set -o pipefail
TMP_ROOT="$(dirname "${BASH_SOURCE}")/../.."
KUBE_ROOT=$(readlink -e ${TMP_ROOT} 2> /dev/null || perl -MCwd -e 'print Cwd::abs_path shift' ${TMP_ROOT})
source "${KUBE_ROOT}/test/kubemark/skeleton/util.sh"
source "${KUBE_ROOT}/test/kubemark/cloud-provider-config.sh"
source "${KUBE_ROOT}/test/kubemark/${CLOUD_PROVIDER}/util.sh"
source "${KUBE_ROOT}/cluster/kubemark/${CLOUD_PROVIDER}/config-default.sh"
source "${KUBE_ROOT}/cluster/kubemark/util.sh"
# hack/lib/init.sh will ovewrite ETCD_VERSION if this is unset
# what what is default in hack/lib/etcd.sh
# To avoid it, if it is empty, we set it to 'avoid-overwrite' and
# clean it after that.
if [ -z "${ETCD_VERSION:-}" ]; then
ETCD_VERSION="avoid-overwrite"
fi
source "${KUBE_ROOT}/hack/lib/init.sh"
if [ "${ETCD_VERSION:-}" == "avoid-overwrite" ]; then
ETCD_VERSION=""
fi
KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh"
KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark"
RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources"
# Generate a random 6-digit alphanumeric tag for the kubemark image.
# Used to uniquify image builds across different invocations of this script.
KUBEMARK_IMAGE_TAG=$(head /dev/urandom | tr -dc 'a-z0-9' | fold -w 6 | head -n 1)
# Write all environment variables that we need to pass to the kubemark master,
# locally to the file ${RESOURCE_DIRECTORY}/kubemark-master-env.sh.
function create-master-environment-file {
cat > "${RESOURCE_DIRECTORY}/kubemark-master-env.sh" <<EOF
# Generic variables.
INSTANCE_PREFIX="${INSTANCE_PREFIX:-}"
SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-}"
EVENT_PD="${EVENT_PD:-}"
# Etcd related variables.
ETCD_IMAGE="${ETCD_IMAGE:-3.2.18-0}"
ETCD_VERSION="${ETCD_VERSION:-}"
# Controller-manager related variables.
CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-}"
ALLOCATE_NODE_CIDRS="${ALLOCATE_NODE_CIDRS:-}"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-}"
TERMINATED_POD_GC_THRESHOLD="${TERMINATED_POD_GC_THRESHOLD:-}"
# Scheduler related variables.
SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS:-}"
# Apiserver related variables.
APISERVER_TEST_ARGS="${APISERVER_TEST_ARGS:-}"
STORAGE_MEDIA_TYPE="${STORAGE_MEDIA_TYPE:-}"
STORAGE_BACKEND="${STORAGE_BACKEND:-etcd3}"
ETCD_QUORUM_READ="${ETCD_QUORUM_READ:-}"
ETCD_COMPACTION_INTERVAL_SEC="${ETCD_COMPACTION_INTERVAL_SEC:-}"
RUNTIME_CONFIG="${RUNTIME_CONFIG:-}"
NUM_NODES="${NUM_NODES:-}"
CUSTOM_ADMISSION_PLUGINS="${CUSTOM_ADMISSION_PLUGINS:-}"
FEATURE_GATES="${FEATURE_GATES:-}"
KUBE_APISERVER_REQUEST_TIMEOUT="${KUBE_APISERVER_REQUEST_TIMEOUT:-}"
ENABLE_APISERVER_ADVANCED_AUDIT="${ENABLE_APISERVER_ADVANCED_AUDIT:-}"
EOF
echo "Created the environment file for master."
}
# Generate certs/keys for CA, master, kubelet and kubecfg, and tokens for kubelet
# and kubeproxy.
function generate-pki-config {
kube::util::ensure-temp-dir
gen-kube-bearertoken
gen-kube-basicauth
create-certs ${MASTER_IP}
KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
NODE_PROBLEM_DETECTOR_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
HEAPSTER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
CLUSTER_AUTOSCALER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_DNS_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
echo "Generated PKI authentication data for kubemark."
}
# Wait for the master to be reachable for executing commands on it. We do this by
# trying to run the bash noop(:) on the master, with 10 retries.
function wait-for-master-reachability {
execute-cmd-on-master-with-retries ":" 10
echo "Checked master reachability for remote command execution."
}
# Write all the relevant certs/keys/tokens to the master.
function write-pki-config-to-master {
PKI_SETUP_CMD="sudo mkdir /home/kubernetes/k8s_auth_data -p && \
sudo bash -c \"echo ${CA_CERT_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/ca.crt\" && \
sudo bash -c \"echo ${MASTER_CERT_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/server.cert\" && \
sudo bash -c \"echo ${MASTER_KEY_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/server.key\" && \
sudo bash -c \"echo ${REQUESTHEADER_CA_CERT_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/aggr_ca.crt\" && \
sudo bash -c \"echo ${PROXY_CLIENT_CERT_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/proxy_client.crt\" && \
sudo bash -c \"echo ${PROXY_CLIENT_KEY_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/proxy_client.key\" && \
sudo bash -c \"echo ${KUBECFG_CERT_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/kubecfg.crt\" && \
sudo bash -c \"echo ${KUBECFG_KEY_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/kubecfg.key\" && \
sudo bash -c \"echo \"${KUBE_BEARER_TOKEN},admin,admin\" > /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
sudo bash -c \"echo \"${KUBELET_TOKEN},system:node:node-name,uid:kubelet,system:nodes\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
sudo bash -c \"echo \"${KUBE_PROXY_TOKEN},system:kube-proxy,uid:kube_proxy\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
sudo bash -c \"echo \"${HEAPSTER_TOKEN},system:heapster,uid:heapster\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
sudo bash -c \"echo \"${CLUSTER_AUTOSCALER_TOKEN},system:cluster-autoscaler,uid:cluster-autoscaler\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
sudo bash -c \"echo \"${NODE_PROBLEM_DETECTOR_TOKEN},system:node-problem-detector,uid:system:node-problem-detector\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
sudo bash -c \"echo \"${KUBE_DNS_TOKEN},system:kube-dns,uid:kube-dns\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
sudo bash -c \"echo ${KUBE_PASSWORD},admin,admin > /home/kubernetes/k8s_auth_data/basic_auth.csv\""
execute-cmd-on-master-with-retries "${PKI_SETUP_CMD}" 3
echo "Wrote PKI certs, keys, tokens and admin password to master."
}
# Write kubeconfig to ${RESOURCE_DIRECTORY}/kubeconfig.kubemark in order to
# use kubectl locally.
function write-local-kubeconfig {
LOCAL_KUBECONFIG="${RESOURCE_DIRECTORY}/kubeconfig.kubemark"
cat > "${LOCAL_KUBECONFIG}" << EOF
apiVersion: v1
kind: Config
users:
- name: kubecfg
user:
client-certificate-data: "${KUBECFG_CERT_BASE64}"
client-key-data: "${KUBECFG_KEY_BASE64}"
username: admin
password: admin
clusters:
- name: kubemark
cluster:
certificate-authority-data: "${CA_CERT_BASE64}"
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: kubecfg
name: kubemark-context
current-context: kubemark-context
EOF
echo "Kubeconfig file for kubemark master written to ${LOCAL_KUBECONFIG}."
}
# Copy all the necessary resource files (scripts/configs/manifests) to the master.
function copy-resource-files-to-master {
copy-files \
"${SERVER_BINARY_TAR}" \
"${RESOURCE_DIRECTORY}/kubemark-master-env.sh" \
"${RESOURCE_DIRECTORY}/start-kubemark-master.sh" \
"${RESOURCE_DIRECTORY}/kubeconfig.kubemark" \
"${KUBEMARK_DIRECTORY}/configure-kubectl.sh" \
"${RESOURCE_DIRECTORY}/manifests/etcd.yaml" \
"${RESOURCE_DIRECTORY}/manifests/etcd-events.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-apiserver.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-scheduler.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-controller-manager.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-addon-manager.yaml" \
"${RESOURCE_DIRECTORY}/manifests/addons/kubemark-rbac-bindings" \
"kubernetes@${MASTER_NAME}":/home/kubernetes/
echo "Copied server binary, master startup scripts, configs and resource manifests to master."
}
# Make startup scripts executable and run start-kubemark-master.sh.
function start-master-components {
echo ""
MASTER_STARTUP_CMD="sudo bash /home/kubernetes/start-kubemark-master.sh"
execute-cmd-on-master-with-retries "${MASTER_STARTUP_CMD}"
echo "The master has started and is now live."
}
# Finds the right kubemark binary for 'linux/amd64' platform and uses it to
# create a docker image for hollow-node and upload it to the appropriate
# docker container registry for the cloud provider.
function create-and-upload-hollow-node-image {
MAKE_DIR="${KUBE_ROOT}/cluster/images/kubemark"
KUBEMARK_BIN="$(kube::util::find-binary-for-platform kubemark linux/amd64)"
if [[ -z "${KUBEMARK_BIN}" ]]; then
echo 'Cannot find cmd/kubemark binary'
exit 1
fi
echo "Configuring registry authentication"
mkdir -p "${HOME}/.docker"
gcloud beta auth configure-docker -q
echo "Copying kubemark binary to ${MAKE_DIR}"
cp "${KUBEMARK_BIN}" "${MAKE_DIR}"
CURR_DIR=`pwd`
cd "${MAKE_DIR}"
RETRIES=3
KUBEMARK_IMAGE_REGISTRY="${KUBEMARK_IMAGE_REGISTRY:-${CONTAINER_REGISTRY}/${PROJECT}}"
for attempt in $(seq 1 ${RETRIES}); do
if ! REGISTRY="${KUBEMARK_IMAGE_REGISTRY}" IMAGE_TAG="${KUBEMARK_IMAGE_TAG}" make "${KUBEMARK_IMAGE_MAKE_TARGET}"; then
if [[ $((attempt)) -eq "${RETRIES}" ]]; then
echo "${color_red}Make failed. Exiting.${color_norm}"
exit 1
fi
echo -e "${color_yellow}Make attempt $(($attempt)) failed. Retrying.${color_norm}" >& 2
sleep $(($attempt * 5))
else
break
fi
done
rm kubemark
cd $CURR_DIR
echo "Created and uploaded the kubemark hollow-node image to docker registry."
}
# Use bazel rule to create a docker image for hollow-node and upload
# it to the appropriate docker container registry for the cloud provider.
function create-and-upload-hollow-node-image-bazel {
echo "Configuring registry authentication"
mkdir -p "${HOME}/.docker"
gcloud beta auth configure-docker -q
RETRIES=3
for attempt in $(seq 1 ${RETRIES}); do
if ! bazel run //cluster/images/kubemark:push --define REGISTRY="${KUBEMARK_IMAGE_REGISTRY}" --define IMAGE_TAG="${KUBEMARK_IMAGE_TAG}"; then
if [[ $((attempt)) -eq "${RETRIES}" ]]; then
echo "${color_red}Image push failed. Exiting.${color_norm}"
exit 1
fi
echo -e "${color_yellow}Make attempt $(($attempt)) failed. Retrying.${color_norm}" >& 2
sleep $(($attempt * 5))
else
break
fi
done
echo "Created and uploaded the kubemark hollow-node image to docker registry."
}
# Generate secret and configMap for the hollow-node pods to work, prepare
# manifests of the hollow-node and heapster replication controllers from
# templates, and finally create these resources through kubectl.
function create-kube-hollow-node-resources {
# Create kubeconfig for Kubelet.
KUBELET_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: kubelet
user:
client-certificate-data: "${KUBELET_CERT_BASE64}"
client-key-data: "${KUBELET_KEY_BASE64}"
clusters:
- name: kubemark
cluster:
certificate-authority-data: "${CA_CERT_BASE64}"
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: kubelet
name: kubemark-context
current-context: kubemark-context")
# Create kubeconfig for Kubeproxy.
KUBEPROXY_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
clusters:
- name: kubemark
cluster:
insecure-skip-tls-verify: true
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: kube-proxy
name: kubemark-context
current-context: kubemark-context")
# Create kubeconfig for Heapster.
HEAPSTER_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: heapster
user:
token: ${HEAPSTER_TOKEN}
clusters:
- name: kubemark
cluster:
insecure-skip-tls-verify: true
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: heapster
name: kubemark-context
current-context: kubemark-context")
# Create kubeconfig for Cluster Autoscaler.
CLUSTER_AUTOSCALER_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: cluster-autoscaler
user:
token: ${CLUSTER_AUTOSCALER_TOKEN}
clusters:
- name: kubemark
cluster:
insecure-skip-tls-verify: true
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: cluster-autoscaler
name: kubemark-context
current-context: kubemark-context")
# Create kubeconfig for NodeProblemDetector.
NPD_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: node-problem-detector
user:
token: ${NODE_PROBLEM_DETECTOR_TOKEN}
clusters:
- name: kubemark
cluster:
insecure-skip-tls-verify: true
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: node-problem-detector
name: kubemark-context
current-context: kubemark-context")
# Create kubeconfig for Kube DNS.
KUBE_DNS_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: kube-dns
user:
token: ${KUBE_DNS_TOKEN}
clusters:
- name: kubemark
cluster:
insecure-skip-tls-verify: true
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: kube-dns
name: kubemark-context
current-context: kubemark-context")
# Create kubemark namespace.
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/kubemark-ns.json"
# Create configmap for configuring hollow- kubelet, proxy and npd.
"${KUBECTL}" create configmap "node-configmap" --namespace="kubemark" \
--from-literal=content.type="${TEST_CLUSTER_API_CONTENT_TYPE}" \
--from-file=kernel.monitor="${RESOURCE_DIRECTORY}/kernel-monitor.json"
# Create secret for passing kubeconfigs to kubelet, kubeproxy and npd.
"${KUBECTL}" create secret generic "kubeconfig" --type=Opaque --namespace="kubemark" \
--from-literal=kubelet.kubeconfig="${KUBELET_KUBECONFIG_CONTENTS}" \
--from-literal=kubeproxy.kubeconfig="${KUBEPROXY_KUBECONFIG_CONTENTS}" \
--from-literal=heapster.kubeconfig="${HEAPSTER_KUBECONFIG_CONTENTS}" \
--from-literal=cluster_autoscaler.kubeconfig="${CLUSTER_AUTOSCALER_KUBECONFIG_CONTENTS}" \
--from-literal=npd.kubeconfig="${NPD_KUBECONFIG_CONTENTS}" \
--from-literal=dns.kubeconfig="${KUBE_DNS_KUBECONFIG_CONTENTS}"
# Create addon pods.
# Heapster.
mkdir -p "${RESOURCE_DIRECTORY}/addons"
sed "s/{{MASTER_IP}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/heapster_template.json" > "${RESOURCE_DIRECTORY}/addons/heapster.json"
metrics_mem_per_node=4
metrics_mem=$((200 + ${metrics_mem_per_node}*${NUM_NODES}))
sed -i'' -e "s/{{METRICS_MEM}}/${metrics_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
metrics_cpu_per_node_numerator=${NUM_NODES}
metrics_cpu_per_node_denominator=2
metrics_cpu=$((80 + metrics_cpu_per_node_numerator / metrics_cpu_per_node_denominator))
sed -i'' -e "s/{{METRICS_CPU}}/${metrics_cpu}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
eventer_mem_per_node=500
eventer_mem=$((200 * 1024 + ${eventer_mem_per_node}*${NUM_NODES}))
sed -i'' -e "s/{{EVENTER_MEM}}/${eventer_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
# Cluster Autoscaler.
if [[ "${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER:-}" == "true" ]]; then
echo "Setting up Cluster Autoscaler"
KUBEMARK_AUTOSCALER_MIG_NAME="${KUBEMARK_AUTOSCALER_MIG_NAME:-${NODE_INSTANCE_PREFIX}-group}"
KUBEMARK_AUTOSCALER_MIN_NODES="${KUBEMARK_AUTOSCALER_MIN_NODES:-0}"
KUBEMARK_AUTOSCALER_MAX_NODES="${KUBEMARK_AUTOSCALER_MAX_NODES:-10}"
NUM_NODES=${KUBEMARK_AUTOSCALER_MAX_NODES}
echo "Setting maximum cluster size to ${NUM_NODES}."
KUBEMARK_MIG_CONFIG="autoscaling.k8s.io/nodegroup: ${KUBEMARK_AUTOSCALER_MIG_NAME}"
sed "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/cluster-autoscaler_template.json" > "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
sed -i'' -e "s/{{kubemark_autoscaler_mig_name}}/${KUBEMARK_AUTOSCALER_MIG_NAME}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
sed -i'' -e "s/{{kubemark_autoscaler_min_nodes}}/${KUBEMARK_AUTOSCALER_MIN_NODES}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
sed -i'' -e "s/{{kubemark_autoscaler_max_nodes}}/${KUBEMARK_AUTOSCALER_MAX_NODES}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
fi
# Kube DNS.
if [[ "${ENABLE_KUBEMARK_KUBE_DNS:-}" == "true" ]]; then
echo "Setting up kube-dns"
sed "s/{{dns_domain}}/${KUBE_DNS_DOMAIN}/g" "${RESOURCE_DIRECTORY}/kube_dns_template.yaml" > "${RESOURCE_DIRECTORY}/addons/kube_dns.yaml"
fi
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/addons" --namespace="kubemark"
# Create the replication controller for hollow-nodes.
# We allow to override the NUM_REPLICAS when running Cluster Autoscaler.
NUM_REPLICAS=${NUM_REPLICAS:-${NUM_NODES}}
sed "s/{{numreplicas}}/${NUM_REPLICAS}/g" "${RESOURCE_DIRECTORY}/hollow-node_template.yaml" > "${RESOURCE_DIRECTORY}/hollow-node.yaml"
proxy_cpu=20
if [ "${NUM_NODES}" -gt 1000 ]; then
proxy_cpu=50
fi
proxy_mem_per_node=50
proxy_mem=$((100 * 1024 + ${proxy_mem_per_node}*${NUM_NODES}))
sed -i'' -e "s/{{HOLLOW_PROXY_CPU}}/${proxy_cpu}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{HOLLOW_PROXY_MEM}}/${proxy_mem}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s'{{kubemark_image_registry}}'${KUBEMARK_IMAGE_REGISTRY}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{kubemark_image_tag}}/${KUBEMARK_IMAGE_TAG}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{kubelet_verbosity_level}}/${KUBELET_TEST_LOG_LEVEL}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{kubeproxy_verbosity_level}}/${KUBEPROXY_TEST_LOG_LEVEL}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{use_real_proxier}}/${USE_REAL_PROXIER}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s'{{kubemark_mig_config}}'${KUBEMARK_MIG_CONFIG:-}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/hollow-node.yaml" --namespace="kubemark"
echo "Created secrets, configMaps, replication-controllers required for hollow-nodes."
}
# Wait until all hollow-nodes are running or there is a timeout.
function wait-for-hollow-nodes-to-run-or-timeout {
echo -n "Waiting for all hollow-nodes to become Running"
start=$(date +%s)
nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node 2> /dev/null) || true
ready=$(($(echo "${nodes}" | grep -v "NotReady" | wc -l) - 1))
until [[ "${ready}" -ge "${NUM_REPLICAS}" ]]; do
echo -n "."
sleep 1
now=$(date +%s)
# Fail it if it already took more than 30 minutes.
if [ $((now - start)) -gt 1800 ]; then
echo ""
echo -e "${color_red} Timeout waiting for all hollow-nodes to become Running. ${color_norm}"
# Try listing nodes again - if it fails it means that API server is not responding
if "${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node &> /dev/null; then
echo "Found only ${ready} ready hollow-nodes while waiting for ${NUM_NODES}."
else
echo "Got error while trying to list hollow-nodes. Probably API server is down."
fi
pods=$("${KUBECTL}" get pods -l name=hollow-node --namespace=kubemark) || true
running=$(($(echo "${pods}" | grep "Running" | wc -l)))
echo "${running} hollow-nodes are reported as 'Running'"
not_running=$(($(echo "${pods}" | grep -v "Running" | wc -l) - 1))
echo "${not_running} hollow-nodes are reported as NOT 'Running'"
echo $(echo "${pods}" | grep -v "Running")
exit 1
fi
nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node 2> /dev/null) || true
ready=$(($(echo "${nodes}" | grep -v "NotReady" | wc -l) - 1))
done
echo -e "${color_green} Done!${color_norm}"
}
############################### Main Function ########################################
detect-project &> /dev/null
# Setup for master.
echo -e "${color_yellow}STARTING SETUP FOR MASTER${color_norm}"
find-release-tars
create-master-environment-file
create-master-instance-with-resources
generate-pki-config
wait-for-master-reachability
write-pki-config-to-master
write-local-kubeconfig
copy-resource-files-to-master
start-master-components
# Setup for hollow-nodes.
echo ""
echo -e "${color_yellow}STARTING SETUP FOR HOLLOW-NODES${color_norm}"
if [[ "${KUBEMARK_BAZEL_BUILD:-}" =~ ^[yY]$ ]]; then
create-and-upload-hollow-node-image-bazel
else
create-and-upload-hollow-node-image
fi
create-kube-hollow-node-resources
wait-for-hollow-nodes-to-run-or-timeout
echo ""
echo "Master IP: ${MASTER_IP}"
echo "Password to kubemark master: ${KUBE_PASSWORD}"
echo "Kubeconfig for kubemark master is written in ${LOCAL_KUBECONFIG}"

42
vendor/k8s.io/kubernetes/test/kubemark/stop-kubemark.sh generated vendored Executable file
View File

@@ -0,0 +1,42 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script that destroys Kubemark cluster and deletes all master resources.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/test/kubemark/skeleton/util.sh"
source "${KUBE_ROOT}/test/kubemark/cloud-provider-config.sh"
source "${KUBE_ROOT}/test/kubemark/${CLOUD_PROVIDER}/util.sh"
source "${KUBE_ROOT}/cluster/kubemark/${CLOUD_PROVIDER}/config-default.sh"
source "${KUBE_ROOT}/cluster/kubemark/util.sh"
KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh"
KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark"
RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources"
detect-project &> /dev/null
"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/addons" &> /dev/null || true
"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/hollow-node.yaml" &> /dev/null || true
"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/kubemark-ns.json" &> /dev/null || true
rm -rf "${RESOURCE_DIRECTORY}/addons" \
"${RESOURCE_DIRECTORY}/kubeconfig.kubemark" \
"${RESOURCE_DIRECTORY}/hollow-node.yaml" \
"${RESOURCE_DIRECTORY}/kubemark-master-env.sh" &> /dev/null || true
delete-master-instance-and-resources