Bumping k8s dependencies to 1.13
This commit is contained in:
3
vendor/k8s.io/kubernetes/cluster/OWNERS
generated
vendored
3
vendor/k8s.io/kubernetes/cluster/OWNERS
generated
vendored
@@ -3,6 +3,7 @@ reviewers:
|
||||
- jbeda
|
||||
- mikedanese
|
||||
- roberthbailey
|
||||
- spiffxp
|
||||
- zmerlynn
|
||||
approvers:
|
||||
- eparis
|
||||
@@ -10,3 +11,5 @@ approvers:
|
||||
- mikedanese
|
||||
- roberthbailey
|
||||
- zmerlynn
|
||||
labels:
|
||||
- sig/cluster-lifecycle
|
||||
|
6
vendor/k8s.io/kubernetes/cluster/README.md
generated
vendored
6
vendor/k8s.io/kubernetes/cluster/README.md
generated
vendored
@@ -1,14 +1,12 @@
|
||||
# Cluster Configuration
|
||||
|
||||
##### Deprecation Notice: This directory has entered maintenance mode and will not be accepting new providers. Please submit new automation deployments to [kube-deploy](https://github.com/kubernetes/kube-deploy). Deployments in this directory will continue to be maintained and supported at their current level of support.
|
||||
##### Deprecation Notice: This directory has entered maintenance mode and will not be accepting new providers. Deployments in this directory will continue to be maintained and supported at their current level of support.
|
||||
|
||||
The scripts and data in this directory automate creation and configuration of a Kubernetes cluster, including networking, DNS, nodes, and master components.
|
||||
The scripts and data in this directory automate creation and configuration of a Kubernetes cluster, including networking, DNS, nodes, and control plane components.
|
||||
|
||||
See the [getting-started guides](https://kubernetes.io/docs/getting-started-guides) for examples of how to use the scripts.
|
||||
|
||||
*cloudprovider*/`config-default.sh` contains a set of tweakable definitions/parameters for the cluster.
|
||||
|
||||
The heavy lifting of configuring the VMs is done by [SaltStack](http://www.saltstack.com/).
|
||||
|
||||
|
||||
[]()
|
||||
|
4
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/CHANGELOG.md
generated
vendored
4
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/CHANGELOG.md
generated
vendored
@@ -1,3 +1,7 @@
|
||||
### Version 8.7 (Tue September 4 2018 Zihong Zheng <zihongz@google.com>)
|
||||
- Support extra `--prune-whitelist` resources in kube-addon-manager.
|
||||
- Update kubectl to v1.10.7.
|
||||
|
||||
### Version 8.6 (Tue February 20 2018 Zihong Zheng <zihongz@google.com>)
|
||||
- Allow reconcile/ensure loop to work with resource under non-kube-system namespace.
|
||||
- Update kubectl to v1.9.3.
|
||||
|
4
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/Makefile
generated
vendored
4
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/Makefile
generated
vendored
@@ -15,8 +15,8 @@
|
||||
IMAGE=staging-k8s.gcr.io/kube-addon-manager
|
||||
ARCH?=amd64
|
||||
TEMP_DIR:=$(shell mktemp -d)
|
||||
VERSION=v8.6
|
||||
KUBECTL_VERSION?=v1.9.3
|
||||
VERSION=v8.7
|
||||
KUBECTL_VERSION?=v1.10.7
|
||||
|
||||
ifeq ($(ARCH),amd64)
|
||||
BASEIMAGE?=bashell/alpine-bash
|
||||
|
46
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/kube-addons.sh
generated
vendored
46
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/kube-addons.sh
generated
vendored
@@ -28,6 +28,29 @@
|
||||
|
||||
KUBECTL=${KUBECTL_BIN:-/usr/local/bin/kubectl}
|
||||
KUBECTL_OPTS=${KUBECTL_OPTS:-}
|
||||
# KUBECTL_PRUNE_WHITELIST is a list of resources whitelisted by
|
||||
# default.
|
||||
# This is currently the same with the default in:
|
||||
# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/apply.go
|
||||
KUBECTL_PRUNE_WHITELIST=(
|
||||
core/v1/ConfigMap
|
||||
core/v1/Endpoints
|
||||
core/v1/Namespace
|
||||
core/v1/PersistentVolumeClaim
|
||||
core/v1/PersistentVolume
|
||||
core/v1/Pod
|
||||
core/v1/ReplicationController
|
||||
core/v1/Secret
|
||||
core/v1/Service
|
||||
batch/v1/Job
|
||||
batch/v1beta1/CronJob
|
||||
extensions/v1beta1/DaemonSet
|
||||
extensions/v1beta1/Deployment
|
||||
extensions/v1beta1/Ingress
|
||||
extensions/v1beta1/ReplicaSet
|
||||
apps/v1beta1/StatefulSet
|
||||
apps/v1beta1/Deployment
|
||||
)
|
||||
|
||||
ADDON_CHECK_INTERVAL_SEC=${TEST_ADDON_CHECK_INTERVAL_SEC:-60}
|
||||
ADDON_PATH=${ADDON_PATH:-/etc/kubernetes/addons}
|
||||
@@ -82,6 +105,25 @@ function log() {
|
||||
esac
|
||||
}
|
||||
|
||||
# Generate kubectl prune-whitelist flags from provided resource list.
|
||||
function generate_prune_whitelist_flags() {
|
||||
local -r resources=($@)
|
||||
for resource in "${resources[@]}"; do
|
||||
printf "%s" "--prune-whitelist ${resource} "
|
||||
done
|
||||
}
|
||||
|
||||
# KUBECTL_EXTRA_PRUNE_WHITELIST is a list of extra whitelisted resources
|
||||
# besides the default ones.
|
||||
extra_prune_whitelist=
|
||||
if [ -n "${KUBECTL_EXTRA_PRUNE_WHITELIST:-}" ]; then
|
||||
extra_prune_whitelist=( ${KUBECTL_EXTRA_PRUNE_WHITELIST:-} )
|
||||
fi
|
||||
prune_whitelist=( ${KUBECTL_PRUNE_WHITELIST[@]} ${extra_prune_whitelist[@]} )
|
||||
prune_whitelist_flags=$(generate_prune_whitelist_flags ${prune_whitelist[@]})
|
||||
|
||||
log INFO "== Generated kubectl prune whitelist flags: $prune_whitelist_flags =="
|
||||
|
||||
# $1 filename of addon to start.
|
||||
# $2 count of tries to start the addon.
|
||||
# $3 delay in seconds between two consecutive tries
|
||||
@@ -126,12 +168,12 @@ function reconcile_addons() {
|
||||
log INFO "== Reconciling with deprecated label =="
|
||||
${KUBECTL} ${KUBECTL_OPTS} apply -f ${ADDON_PATH} \
|
||||
-l ${CLUSTER_SERVICE_LABEL}=true,${ADDON_MANAGER_LABEL}!=EnsureExists \
|
||||
--prune=true --recursive | grep -v configured
|
||||
--prune=true ${prune_whitelist_flags} --recursive | grep -v configured
|
||||
|
||||
log INFO "== Reconciling with addon-manager label =="
|
||||
${KUBECTL} ${KUBECTL_OPTS} apply -f ${ADDON_PATH} \
|
||||
-l ${CLUSTER_SERVICE_LABEL}!=true,${ADDON_MANAGER_LABEL}=Reconcile \
|
||||
--prune=true --recursive | grep -v configured
|
||||
--prune=true ${prune_whitelist_flags} --recursive | grep -v configured
|
||||
|
||||
log INFO "== Kubernetes addon reconcile completed at $(date -Is) =="
|
||||
}
|
||||
|
17
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-clusterrole.yaml
generated
vendored
17
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-clusterrole.yaml
generated
vendored
@@ -10,6 +10,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- namespaces
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
@@ -36,6 +37,7 @@ rules:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- patch
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
@@ -51,17 +53,28 @@ rules:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- globalfelixconfigs
|
||||
- felixconfigurations
|
||||
- bgppeers
|
||||
- globalbgpconfigs
|
||||
- bgpconfigurations
|
||||
- ippools
|
||||
- globalnetworkpolicies
|
||||
- globalnetworksets
|
||||
- networkpolicies
|
||||
- clusterinformations
|
||||
- hostendpoints
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- watch
|
||||
|
@@ -41,18 +41,22 @@ spec:
|
||||
value: "none"
|
||||
- name: DATASTORE_TYPE
|
||||
value: "kubernetes"
|
||||
- name: FELIX_TYPHAK8SSERVICENAME
|
||||
value: "calico-typha"
|
||||
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
||||
value: "ACCEPT"
|
||||
- name: FELIX_HEALTHENABLED
|
||||
value: "true"
|
||||
- name: FELIX_IPV6SUPPORT
|
||||
value: "false"
|
||||
- name: FELIX_LOGSEVERITYSYS
|
||||
value: "none"
|
||||
- name: FELIX_LOGSEVERITYSCREEN
|
||||
value: "info"
|
||||
- name: FELIX_PROMETHEUSMETRICSENABLED
|
||||
value: "true"
|
||||
- name: FELIX_HEALTHENABLED
|
||||
value: "true"
|
||||
- name: FELIX_REPORTINGINTERVALSECS
|
||||
value: "0"
|
||||
- name: FELIX_TYPHAK8SSERVICENAME
|
||||
value: "calico-typha"
|
||||
- name: IP
|
||||
value: ""
|
||||
- name: NO_DEFAULT_POOLS
|
||||
@@ -84,6 +88,12 @@ spec:
|
||||
- mountPath: /etc/calico
|
||||
name: etc-calico
|
||||
readOnly: true
|
||||
- mountPath: /var/run/calico
|
||||
name: var-run-calico
|
||||
readOnly: false
|
||||
- mountPath: /var/lib/calico
|
||||
name: var-lib-calico
|
||||
readOnly: false
|
||||
# This container installs the Calico CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
@@ -149,6 +159,12 @@ spec:
|
||||
- name: cni-net-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: var-run-calico
|
||||
hostPath:
|
||||
path: /var/run/calico
|
||||
- name: var-lib-calico
|
||||
hostPath:
|
||||
path: /var/lib/calico
|
||||
tolerations:
|
||||
# Make sure calico/node gets scheduled on all nodes.
|
||||
- effect: NoSchedule
|
||||
|
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/clusterinformations-crd.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/clusterinformations-crd.yaml
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: clusterinformations.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: ClusterInformation
|
||||
plural: clusterinformations
|
||||
singular: clusterinformation
|
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/felixconfigurations-crd.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/felixconfigurations-crd.yaml
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: felixconfigurations.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: FelixConfiguration
|
||||
plural: felixconfigurations
|
||||
singular: felixconfiguration
|
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/globalnetworksets-crd.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/globalnetworksets-crd.yaml
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalnetworksets.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: GlobalNetworkSet
|
||||
plural: globalnetworksets
|
||||
singular: globalnetworkset
|
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/hostendpoints-crd.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/hostendpoints-crd.yaml
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: hostendpoints.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: HostEndpoint
|
||||
plural: hostendpoints
|
||||
singular: hostendpoint
|
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/networkpolicies-crd.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/networkpolicies-crd.yaml
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: networkpolicies.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Namespaced
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: NetworkPolicy
|
||||
plural: networkpolicies
|
||||
singular: networkpolicy
|
@@ -44,6 +44,8 @@ spec:
|
||||
value: "9093"
|
||||
- name: TYPHA_DATASTORETYPE
|
||||
value: "kubernetes"
|
||||
- name: TYPHA_REPORTINGINTERVALSECS
|
||||
value: "0"
|
||||
- name: TYPHA_MAXCONNECTIONSLOWERLIMIT
|
||||
value: "1"
|
||||
- name: TYPHA_HEALTHENABLED
|
||||
|
14
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-clusterrole.yaml
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-clusterrole.yaml
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: typha-cpva
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["list"]
|
||||
- apiGroups: ["apps", "extensions"]
|
||||
resources: ["deployments"]
|
||||
verbs: ["patch"]
|
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-clusterrolebinding.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-clusterrolebinding.yaml
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: typha-cpva
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: typha-cpva
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: typha-cpva
|
||||
namespace: kube-system
|
8
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-serviceaccount.yaml
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-serviceaccount.yaml
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: typha-cpva
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
@@ -25,7 +25,7 @@ spec:
|
||||
# Any image is permissible as long as:
|
||||
# 1. It serves a 404 page at /
|
||||
# 2. It serves 200 on a /healthz endpoint
|
||||
image: k8s.gcr.io/defaultbackend:1.4
|
||||
image: k8s.gcr.io/defaultbackend-amd64:1.5
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
|
@@ -36,31 +36,31 @@ data:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.5.3
|
||||
name: heapster-v1.6.0-beta.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -73,13 +73,13 @@ spec:
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --sink=gcm
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: eventer
|
||||
command:
|
||||
- /eventer
|
||||
- --source=kubernetes:''
|
||||
- --sink=gcl
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.1
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.3
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
@@ -108,11 +108,14 @@ spec:
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{metrics_memory_per_node}}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.1
|
||||
# Specifies the smallest cluster (defined in number of nodes)
|
||||
# resources will be scaled to.
|
||||
- --minClusterSize={{ heapster_min_cluster_size }}
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.2
|
||||
name: eventer-nanny
|
||||
resources:
|
||||
limits:
|
||||
@@ -141,7 +144,7 @@ spec:
|
||||
- --memory={{base_eventer_memory}}
|
||||
- --extra-memory={{eventer_memory_per_node}}Ki
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=eventer
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
|
@@ -36,31 +36,31 @@ data:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.5.3
|
||||
name: heapster-v1.6.0-beta.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -74,13 +74,13 @@ spec:
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --sink=influxdb:http://monitoring-influxdb:8086
|
||||
- --sink=gcm:?metrics=autoscaling
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: eventer
|
||||
command:
|
||||
- /eventer
|
||||
- --source=kubernetes:''
|
||||
- --sink=gcl
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.1
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.3
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
@@ -109,11 +109,14 @@ spec:
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{ metrics_memory_per_node }}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.1
|
||||
# Specifies the smallest cluster (defined in number of nodes)
|
||||
# resources will be scaled to.
|
||||
- --minClusterSize={{ heapster_min_cluster_size }}
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.2
|
||||
name: eventer-nanny
|
||||
resources:
|
||||
limits:
|
||||
@@ -142,7 +145,7 @@ spec:
|
||||
- --memory={{ base_eventer_memory }}
|
||||
- --extra-memory={{ eventer_memory_per_node }}Ki
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=eventer
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
|
@@ -36,31 +36,31 @@ data:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.5.3
|
||||
name: heapster-v1.6.0-beta.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -73,13 +73,13 @@ spec:
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --sink=influxdb:http://monitoring-influxdb:8086
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: eventer
|
||||
command:
|
||||
- /eventer
|
||||
- --source=kubernetes:''
|
||||
- --sink=influxdb:http://monitoring-influxdb:8086
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.1
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.3
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
@@ -108,11 +108,14 @@ spec:
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{ metrics_memory_per_node }}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.1
|
||||
# Specifies the smallest cluster (defined in number of nodes)
|
||||
# resources will be scaled to.
|
||||
- --minClusterSize={{ heapster_min_cluster_size }}
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.2
|
||||
name: eventer-nanny
|
||||
resources:
|
||||
limits:
|
||||
@@ -141,7 +144,7 @@ spec:
|
||||
- --memory={{ base_eventer_memory }}
|
||||
- --extra-memory={{ eventer_memory_per_node }}Ki
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=eventer
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
|
@@ -23,31 +23,31 @@ data:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.5.3
|
||||
name: heapster-v1.6.0-beta.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -57,12 +57,13 @@ spec:
|
||||
initialDelaySeconds: 180
|
||||
timeoutSeconds: 5
|
||||
command:
|
||||
# On GCP, container.googleapis.com/instance_id node annotation is used to provide instance_id label for Stackdriver
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --source=kubernetes.summary_api:?host_id_annotation=container.googleapis.com/instance_id
|
||||
- --sink=stackdriver:?cluster_name={{ cluster_name }}&use_old_resources={{ use_old_resources }}&use_new_resources={{ use_new_resources }}&min_interval_sec=100&batch_export_timeout_sec=110&cluster_location={{ cluster_location }}
|
||||
# BEGIN_PROMETHEUS_TO_SD
|
||||
- name: prom-to-sd
|
||||
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
|
||||
image: k8s.gcr.io/prometheus-to-sd:v0.3.1
|
||||
command:
|
||||
- /monitor
|
||||
- --source=heapster:http://localhost:8082?whitelisted=stackdriver_requests_count,stackdriver_timeseries_count
|
||||
@@ -80,7 +81,7 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
# END_PROMETHEUS_TO_SD
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.1
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.3
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
@@ -109,10 +110,13 @@ spec:
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{metrics_memory_per_node}}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
# Specifies the smallest cluster (defined in number of nodes)
|
||||
# resources will be scaled to.
|
||||
- --minClusterSize={{ heapster_min_cluster_size }}
|
||||
volumes:
|
||||
- name: heapster-config-volume
|
||||
configMap:
|
||||
|
@@ -23,31 +23,31 @@ data:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.5.3
|
||||
name: heapster-v1.6.0-beta.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.5.3
|
||||
version: v1.6.0-beta.1
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.3
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -59,7 +59,7 @@ spec:
|
||||
command:
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.1
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.3
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
@@ -88,10 +88,13 @@ spec:
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{ metrics_memory_per_node }}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.5.3
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
# Specifies the smallest cluster (defined in number of nodes)
|
||||
# resources will be scaled to.
|
||||
- --minClusterSize={{ heapster_min_cluster_size }}
|
||||
volumes:
|
||||
- name: heapster-config-volume
|
||||
configMap:
|
||||
|
10
vendor/k8s.io/kubernetes/cluster/addons/dashboard/OWNERS
generated
vendored
10
vendor/k8s.io/kubernetes/cluster/addons/dashboard/OWNERS
generated
vendored
@@ -1,6 +1,12 @@
|
||||
approvers:
|
||||
- floreks
|
||||
- maciaszczykm
|
||||
- bryk
|
||||
reviewers:
|
||||
- cheld
|
||||
- cupofcat
|
||||
- danielromlein
|
||||
- floreks
|
||||
- ianlewis
|
||||
- konryd
|
||||
- maciaszczykm
|
||||
- mhenc
|
||||
- rf232
|
||||
|
@@ -29,7 +29,7 @@ metadata:
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["list"]
|
||||
verbs: ["list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["replicationcontrollers/scale"]
|
||||
verbs: ["get", "update"]
|
||||
@@ -82,7 +82,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: autoscaler
|
||||
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2
|
||||
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.2.0
|
||||
resources:
|
||||
requests:
|
||||
cpu: "20m"
|
||||
|
5
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/coredns.yaml.base
generated
vendored
5
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/coredns.yaml.base
generated
vendored
@@ -66,7 +66,9 @@ data:
|
||||
prometheus :9153
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
@@ -106,7 +108,7 @@ spec:
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: coredns
|
||||
image: k8s.gcr.io/coredns:1.1.3
|
||||
image: k8s.gcr.io/coredns:1.2.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
@@ -161,6 +163,7 @@ metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/port: "9153"
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
|
5
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/coredns.yaml.in
generated
vendored
5
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/coredns.yaml.in
generated
vendored
@@ -66,7 +66,9 @@ data:
|
||||
prometheus :9153
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
@@ -106,7 +108,7 @@ spec:
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: coredns
|
||||
image: k8s.gcr.io/coredns:1.1.3
|
||||
image: k8s.gcr.io/coredns:1.2.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
@@ -161,6 +163,7 @@ metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/port: "9153"
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
|
5
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/coredns.yaml.sed
generated
vendored
5
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/coredns.yaml.sed
generated
vendored
@@ -66,7 +66,9 @@ data:
|
||||
prometheus :9153
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
@@ -106,7 +108,7 @@ spec:
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: coredns
|
||||
image: k8s.gcr.io/coredns:1.1.3
|
||||
image: k8s.gcr.io/coredns:1.2.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
@@ -161,6 +163,7 @@ metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/port: "9153"
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
|
6
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/README.md
generated
vendored
6
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/README.md
generated
vendored
@@ -9,7 +9,7 @@ can use the DNS Service’s IP to resolve DNS names.
|
||||
## Manually scale kube-dns Deployment
|
||||
|
||||
kube-dns creates only one DNS Pod by default. If
|
||||
[dns-horizontal-autoscaler](../dns-horizontal-autoscaler/)
|
||||
[dns-horizontal-autoscaler](../../dns-horizontal-autoscaler/)
|
||||
is not enabled, you may need to manually scale kube-dns Deployment.
|
||||
|
||||
Please use below `kubectl scale` command to scale:
|
||||
@@ -18,9 +18,9 @@ kubectl --namespace=kube-system scale deployment kube-dns --replicas=<NUM_YOU_WA
|
||||
```
|
||||
|
||||
Do not use `kubectl edit` to modify kube-dns Deployment object if it is
|
||||
controlled by [Addon Manager](../addon-manager/). Otherwise the modifications
|
||||
controlled by [Addon Manager](../../addon-manager/). Otherwise the modifications
|
||||
will be clobbered, in addition the replicas count for kube-dns Deployment will
|
||||
be reset to 1. See [Cluster add-ons README](../README.md) and
|
||||
be reset to 1. See [Cluster add-ons README](../../README.md) and
|
||||
[#36411](https://github.com/kubernetes/kubernetes/issues/36411) for reference.
|
||||
|
||||
## kube-dns addon templates
|
||||
|
7
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/kube-dns.yaml.base
generated
vendored
7
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/kube-dns.yaml.base
generated
vendored
@@ -96,7 +96,7 @@ spec:
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
@@ -147,7 +147,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
@@ -166,6 +166,7 @@ spec:
|
||||
- -k
|
||||
- --cache-size=1000
|
||||
- --no-negcache
|
||||
- --dns-loop-detect
|
||||
- --log-facility=-
|
||||
- --server=/__PILLAR__DNS__DOMAIN__/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
@@ -186,7 +187,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10
|
||||
image: k8s.gcr.io/k8s-dns-sidecar:1.14.13
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
|
7
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/kube-dns.yaml.in
generated
vendored
7
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/kube-dns.yaml.in
generated
vendored
@@ -96,7 +96,7 @@ spec:
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
@@ -147,7 +147,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
@@ -166,6 +166,7 @@ spec:
|
||||
- -k
|
||||
- --cache-size=1000
|
||||
- --no-negcache
|
||||
- --dns-loop-detect
|
||||
- --log-facility=-
|
||||
- --server=/{{ pillar['dns_domain'] }}/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
@@ -186,7 +187,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10
|
||||
image: k8s.gcr.io/k8s-dns-sidecar:1.14.13
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
|
7
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/kube-dns.yaml.sed
generated
vendored
7
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/kube-dns.yaml.sed
generated
vendored
@@ -96,7 +96,7 @@ spec:
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
@@ -147,7 +147,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
@@ -166,6 +166,7 @@ spec:
|
||||
- -k
|
||||
- --cache-size=1000
|
||||
- --no-negcache
|
||||
- --dns-loop-detect
|
||||
- --log-facility=-
|
||||
- --server=/$DNS_DOMAIN/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
@@ -186,7 +187,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10
|
||||
image: k8s.gcr.io/k8s-dns-sidecar:1.14.13
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/OWNERS
generated
vendored
@@ -4,3 +4,5 @@ approvers:
|
||||
reviewers:
|
||||
- coffeepac
|
||||
- piosz
|
||||
labels:
|
||||
- sig/instrumentation
|
||||
|
19
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/README.md
generated
vendored
19
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/README.md
generated
vendored
@@ -19,15 +19,16 @@ a Deployment, but allows for maintaining state on storage volumes.
|
||||
|
||||
### Security
|
||||
|
||||
Elasticsearch has capabilities to enable authorization using the
|
||||
[X-Pack plugin][xPack]. See configuration parameter `xpack.security.enabled`
|
||||
in Elasticsearch and Kibana configurations. It can also be set via the
|
||||
`XPACK_SECURITY_ENABLED` env variable. After enabling the feature,
|
||||
follow [official documentation][setupCreds] to set up credentials in
|
||||
Elasticsearch and Kibana. Don't forget to propagate those credentials also to
|
||||
Fluentd in its [configuration][fluentdCreds], using for example
|
||||
[environment variables][fluentdEnvVar]. You can utilize [ConfigMaps][configMap]
|
||||
and [Secrets][secret] to store credentials in the Kubernetes apiserver.
|
||||
Elasticsearch has capabilities to enable authorization using the [X-Pack
|
||||
plugin][xPack]. For the sake of simplicity this example uses the fully open
|
||||
source prebuild images from elastic that do not contain the X-Pack plugin. If
|
||||
you need these features, please consider building the images from either the
|
||||
"basic" or "platinum" version. After enabling these features, follow [official
|
||||
documentation][setupCreds] to set up credentials in Elasticsearch and Kibana.
|
||||
Don't forget to propagate those credentials also to Fluentd in its
|
||||
[configuration][fluentdCreds], using for example [environment
|
||||
variables][fluentdEnvVar]. You can utilize [ConfigMaps][configMap] and
|
||||
[Secrets][secret] to store credentials in the Kubernetes apiserver.
|
||||
|
||||
### Initialization
|
||||
|
||||
|
8
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/BUILD
generated
vendored
8
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/BUILD
generated
vendored
@@ -18,11 +18,11 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/Dockerfile
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/Dockerfile
generated
vendored
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM docker.elastic.co/elasticsearch/elasticsearch:5.6.4
|
||||
FROM docker.elastic.co/elasticsearch/elasticsearch-oss:6.3.2
|
||||
|
||||
VOLUME ["/data"]
|
||||
EXPOSE 9200 9300
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/Makefile
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/Makefile
generated
vendored
@@ -16,7 +16,7 @@
|
||||
|
||||
PREFIX = staging-k8s.gcr.io
|
||||
IMAGE = elasticsearch
|
||||
TAG = v5.6.4
|
||||
TAG = v6.3.0
|
||||
|
||||
build:
|
||||
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .
|
||||
|
@@ -12,6 +12,3 @@ path.data: /data
|
||||
network.host: 0.0.0.0
|
||||
|
||||
discovery.zen.minimum_master_nodes: ${MINIMUM_MASTER_NODES}
|
||||
|
||||
xpack.security.enabled: false
|
||||
xpack.monitoring.enabled: false
|
||||
|
@@ -20,6 +20,7 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -104,7 +105,7 @@ func main() {
|
||||
var endpoints *api.Endpoints
|
||||
addrs := []string{}
|
||||
// Wait for some endpoints.
|
||||
count := 0
|
||||
count, _ := strconv.Atoi(os.Getenv("MINIMUM_MASTER_NODES"))
|
||||
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
|
||||
endpoints, err = client.Core().Endpoints(namespace).Get(serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -115,7 +116,6 @@ func main() {
|
||||
if len(addrs) > 0 && len(addrs) == count {
|
||||
break
|
||||
}
|
||||
count = len(addrs)
|
||||
}
|
||||
// If there was an error finding endpoints then log a warning and quit.
|
||||
if err != nil {
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/run.sh
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/run.sh
generated
vendored
@@ -26,4 +26,4 @@ export MINIMUM_MASTER_NODES=${MINIMUM_MASTER_NODES:-2}
|
||||
chown -R elasticsearch:elasticsearch /data
|
||||
|
||||
./bin/elasticsearch_logging_discovery >> ./config/elasticsearch.yml
|
||||
exec su elasticsearch -c ./bin/es-docker
|
||||
exec su elasticsearch -c /usr/local/bin/docker-entrypoint.sh
|
||||
|
8
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml
generated
vendored
8
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml
generated
vendored
@@ -54,7 +54,7 @@ metadata:
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: v5.6.4
|
||||
version: v6.2.5
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
@@ -63,17 +63,17 @@ spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: v5.6.4
|
||||
version: v6.2.5
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: v5.6.4
|
||||
version: v6.2.5
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
serviceAccountName: elasticsearch-logging
|
||||
containers:
|
||||
- image: k8s.gcr.io/elasticsearch:v5.6.4
|
||||
- image: k8s.gcr.io/elasticsearch:v6.2.5
|
||||
name: elasticsearch-logging
|
||||
resources:
|
||||
# need more cpu upon initialization, therefore burstable class
|
||||
|
36
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml
generated
vendored
36
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml
generated
vendored
@@ -1,7 +1,7 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: fluentd-es-config-v0.1.4
|
||||
name: fluentd-es-config-v0.1.5
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
@@ -115,7 +115,6 @@ data:
|
||||
@type tail
|
||||
path /var/log/containers/*.log
|
||||
pos_file /var/log/es-containers.log.pos
|
||||
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||
tag raw.kubernetes.*
|
||||
read_from_head true
|
||||
<parse>
|
||||
@@ -273,21 +272,6 @@ data:
|
||||
tag kube-scheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
|
||||
<source>
|
||||
@id rescheduler.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/rescheduler.log
|
||||
pos_file /var/log/es-rescheduler.log.pos
|
||||
tag rescheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
|
||||
<source>
|
||||
@@ -323,10 +307,11 @@ data:
|
||||
<source>
|
||||
@id journald-docker
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "docker.service" }]
|
||||
matches [{ "_SYSTEMD_UNIT": "docker.service" }]
|
||||
<storage>
|
||||
@type local
|
||||
persistent true
|
||||
path /var/log/journald-docker.pos
|
||||
</storage>
|
||||
read_from_head true
|
||||
tag docker
|
||||
@@ -335,10 +320,11 @@ data:
|
||||
<source>
|
||||
@id journald-container-runtime
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "{{ container_runtime }}.service" }]
|
||||
matches [{ "_SYSTEMD_UNIT": "{{ container_runtime }}.service" }]
|
||||
<storage>
|
||||
@type local
|
||||
persistent true
|
||||
path /var/log/journald-container-runtime.pos
|
||||
</storage>
|
||||
read_from_head true
|
||||
tag container-runtime
|
||||
@@ -347,10 +333,11 @@ data:
|
||||
<source>
|
||||
@id journald-kubelet
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
|
||||
matches [{ "_SYSTEMD_UNIT": "kubelet.service" }]
|
||||
<storage>
|
||||
@type local
|
||||
persistent true
|
||||
path /var/log/journald-kubelet.pos
|
||||
</storage>
|
||||
read_from_head true
|
||||
tag kubelet
|
||||
@@ -359,22 +346,24 @@ data:
|
||||
<source>
|
||||
@id journald-node-problem-detector
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
|
||||
matches [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
|
||||
<storage>
|
||||
@type local
|
||||
persistent true
|
||||
path /var/log/journald-node-problem-detector.pos
|
||||
</storage>
|
||||
read_from_head true
|
||||
tag node-problem-detector
|
||||
</source>
|
||||
|
||||
|
||||
<source>
|
||||
@id kernel
|
||||
@type systemd
|
||||
filters [{ "_TRANSPORT": "kernel" }]
|
||||
matches [{ "_TRANSPORT": "kernel" }]
|
||||
<storage>
|
||||
@type local
|
||||
persistent true
|
||||
path /var/log/kernel.pos
|
||||
</storage>
|
||||
<entry>
|
||||
fields_strip_underscores true
|
||||
@@ -435,6 +424,7 @@ data:
|
||||
@id elasticsearch
|
||||
@type elasticsearch
|
||||
@log_level info
|
||||
type_name fluentd
|
||||
include_tag_key true
|
||||
host elasticsearch-logging
|
||||
port 9200
|
||||
|
12
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml
generated
vendored
12
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml
generated
vendored
@@ -48,24 +48,24 @@ roleRef:
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluentd-es-v2.0.4
|
||||
name: fluentd-es-v2.2.0
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
version: v2.0.4
|
||||
version: v2.2.0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: fluentd-es
|
||||
version: v2.0.4
|
||||
version: v2.2.0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
kubernetes.io/cluster-service: "true"
|
||||
version: v2.0.4
|
||||
version: v2.2.0
|
||||
# This annotation ensures that fluentd does not get evicted if the node
|
||||
# supports critical pod annotation based priority scheme.
|
||||
# Note that this does not guarantee admission on the nodes (#40573).
|
||||
@@ -77,7 +77,7 @@ spec:
|
||||
serviceAccountName: fluentd-es
|
||||
containers:
|
||||
- name: fluentd-es
|
||||
image: k8s.gcr.io/fluentd-elasticsearch:v2.0.4
|
||||
image: k8s.gcr.io/fluentd-elasticsearch:v2.2.0
|
||||
env:
|
||||
- name: FLUENTD_ARGS
|
||||
value: --no-supervisor -q
|
||||
@@ -107,4 +107,4 @@ spec:
|
||||
path: /var/lib/docker/containers
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: fluentd-es-config-v0.1.4
|
||||
name: fluentd-es-config-v0.1.5
|
||||
|
16
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Gemfile
generated
vendored
16
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Gemfile
generated
vendored
@@ -1,11 +1,11 @@
|
||||
source 'https://rubygems.org'
|
||||
|
||||
gem 'fluentd', '<=1.1.0'
|
||||
gem 'activesupport', '~>5.1.4'
|
||||
gem 'fluent-plugin-kubernetes_metadata_filter', '~>1.0.0'
|
||||
gem 'fluent-plugin-elasticsearch', '~>2.4.1'
|
||||
gem 'fluent-plugin-systemd', '~>0.3.1'
|
||||
gem 'fluent-plugin-detect-exceptions', '~>0.0.9'
|
||||
gem 'fluent-plugin-prometheus', '~>0.3.0'
|
||||
gem 'fluentd', '<=1.2.4'
|
||||
gem 'activesupport', '~>5.2.1'
|
||||
gem 'fluent-plugin-kubernetes_metadata_filter', '~>2.0.0'
|
||||
gem 'fluent-plugin-elasticsearch', '~>2.11.5'
|
||||
gem 'fluent-plugin-systemd', '~>1.0.1'
|
||||
gem 'fluent-plugin-detect-exceptions', '~>0.0.11'
|
||||
gem 'fluent-plugin-prometheus', '~>1.0.1'
|
||||
gem 'fluent-plugin-multi-format-parser', '~>1.0.0'
|
||||
gem 'oj', '~>3.3.1.0'
|
||||
gem 'oj', '~>3.6.5'
|
||||
|
@@ -16,7 +16,7 @@
|
||||
|
||||
PREFIX = staging-k8s.gcr.io
|
||||
IMAGE = fluentd-elasticsearch
|
||||
TAG = v2.0.4
|
||||
TAG = v2.3.1
|
||||
|
||||
build:
|
||||
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .
|
||||
|
6
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml
generated
vendored
6
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml
generated
vendored
@@ -21,7 +21,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: kibana-logging
|
||||
image: docker.elastic.co/kibana/kibana:5.6.4
|
||||
image: docker.elastic.co/kibana/kibana-oss:6.3.2
|
||||
resources:
|
||||
# need more cpu upon initialization, therefore burstable class
|
||||
limits:
|
||||
@@ -33,10 +33,6 @@ spec:
|
||||
value: http://elasticsearch-logging:9200
|
||||
- name: SERVER_BASEPATH
|
||||
value: /api/v1/namespaces/kube-system/services/kibana-logging/proxy
|
||||
- name: XPACK_MONITORING_ENABLED
|
||||
value: "false"
|
||||
- name: XPACK_SECURITY_ENABLED
|
||||
value: "false"
|
||||
ports:
|
||||
- containerPort: 5601
|
||||
name: ui
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/OWNERS
generated
vendored
@@ -4,3 +4,5 @@ approvers:
|
||||
reviewers:
|
||||
- piosz
|
||||
- x13n
|
||||
labels:
|
||||
- sig/gcp
|
||||
|
10
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/event-exporter.yaml
generated
vendored
10
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/event-exporter.yaml
generated
vendored
@@ -29,11 +29,11 @@ subjects:
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: event-exporter-v0.2.1
|
||||
name: event-exporter-v0.2.3
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: event-exporter
|
||||
version: v0.2.1
|
||||
version: v0.2.3
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
@@ -42,18 +42,18 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: event-exporter
|
||||
version: v0.2.1
|
||||
version: v0.2.3
|
||||
spec:
|
||||
serviceAccountName: event-exporter-sa
|
||||
containers:
|
||||
- name: event-exporter
|
||||
image: k8s.gcr.io/event-exporter:v0.2.1
|
||||
image: k8s.gcr.io/event-exporter:v0.2.3
|
||||
command:
|
||||
- /event-exporter
|
||||
- -sink-opts=-stackdriver-resource-model={{ exporter_sd_resource_model }}
|
||||
# BEGIN_PROMETHEUS_TO_SD
|
||||
- name: prometheus-to-sd-exporter
|
||||
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
|
||||
image: k8s.gcr.io/prometheus-to-sd:v0.3.1
|
||||
command:
|
||||
- /monitor
|
||||
- --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons
|
||||
|
14
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-configmap-old.yaml
generated
vendored
14
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-configmap-old.yaml
generated
vendored
@@ -210,20 +210,6 @@ data:
|
||||
tag kube-scheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
|
||||
<source>
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/rescheduler.log
|
||||
pos_file /var/log/gcp-rescheduler.log.pos
|
||||
tag rescheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
|
||||
<source>
|
||||
|
42
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml
generated
vendored
42
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml
generated
vendored
@@ -98,6 +98,8 @@ data:
|
||||
# instead of jsonPayload after extracting 'time', 'severity' and
|
||||
# 'stream' from the record.
|
||||
message ${record['log']}
|
||||
# If 'severity' is not set, assume stderr is ERROR and stdout is INFO.
|
||||
severity ${record['severity'] || if record['stream'] == 'stderr' then 'ERROR' else 'INFO' end}
|
||||
</record>
|
||||
tag ${if record['stream'] == 'stderr' then 'raw.stderr' else 'raw.stdout' end}
|
||||
remove_keys stream,log
|
||||
@@ -108,8 +110,8 @@ data:
|
||||
@type detect_exceptions
|
||||
|
||||
remove_tag_prefix raw
|
||||
message log
|
||||
stream stream
|
||||
message message
|
||||
stream "logging.googleapis.com/local_resource_id"
|
||||
multiline_flush_interval 5
|
||||
max_bytes 500000
|
||||
max_lines 1000
|
||||
@@ -223,20 +225,6 @@ data:
|
||||
tag kube-scheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
|
||||
<source>
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
|
||||
time_format %m%d %H:%M:%S.%N
|
||||
path /var/log/rescheduler.log
|
||||
pos_file /var/log/gcp-rescheduler.log.pos
|
||||
tag rescheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
|
||||
<source>
|
||||
@@ -386,6 +374,12 @@ data:
|
||||
@type null
|
||||
</match>
|
||||
|
||||
# Add a unique insertId to each log entry that doesn't already have it.
|
||||
# This helps guarantee the order and prevent log duplication.
|
||||
<filter **>
|
||||
@type add_insert_ids
|
||||
</filter>
|
||||
|
||||
# This section is exclusive for k8s_container logs. These logs come with
|
||||
# 'stderr'/'stdout' tags.
|
||||
# We use a separate output stanza for 'k8s_node' logs with a smaller buffer
|
||||
@@ -408,9 +402,9 @@ data:
|
||||
buffer_queue_full_action block
|
||||
# Set the chunk limit conservatively to avoid exceeding the recommended
|
||||
# chunk size of 5MB per write request.
|
||||
buffer_chunk_limit 1M
|
||||
buffer_chunk_limit 512k
|
||||
# Cap the combined memory usage of this buffer and the one below to
|
||||
# 1MiB/chunk * (6 + 2) chunks = 8 MiB
|
||||
# 512KiB/chunk * (6 + 2) chunks = 4 MiB
|
||||
buffer_queue_limit 6
|
||||
# Never wait more than 5 seconds before flushing logs in the non-error case.
|
||||
flush_interval 5s
|
||||
@@ -421,8 +415,9 @@ data:
|
||||
# Use multiple threads for processing.
|
||||
num_threads 2
|
||||
use_grpc true
|
||||
# Use Metadata Agent to get monitored resource.
|
||||
enable_metadata_agent true
|
||||
# Skip timestamp adjustment as this is in a controlled environment with
|
||||
# known timestamp format. This helps with CPU usage.
|
||||
adjust_invalid_timestamps false
|
||||
</match>
|
||||
|
||||
# Attach local_resource_id for 'k8s_node' monitored resource.
|
||||
@@ -450,15 +445,16 @@ data:
|
||||
buffer_type file
|
||||
buffer_path /var/log/fluentd-buffers/kubernetes.system.buffer
|
||||
buffer_queue_full_action block
|
||||
buffer_chunk_limit 1M
|
||||
buffer_chunk_limit 512k
|
||||
buffer_queue_limit 2
|
||||
flush_interval 5s
|
||||
max_retry_wait 30
|
||||
disable_retry_limit
|
||||
num_threads 2
|
||||
use_grpc true
|
||||
# Use Metadata Agent to get monitored resource.
|
||||
enable_metadata_agent true
|
||||
# Skip timestamp adjustment as this is in a controlled environment with
|
||||
# known timestamp format. This helps with CPU usage.
|
||||
adjust_invalid_timestamps false
|
||||
</match>
|
||||
metadata:
|
||||
name: fluentd-gcp-config-v1.2.5
|
||||
|
16
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml
generated
vendored
16
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml
generated
vendored
@@ -1,13 +1,13 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluentd-gcp-v3.0.0
|
||||
name: fluentd-gcp-{{ fluentd_gcp_yaml_version }}
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: fluentd-gcp
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v3.0.0
|
||||
version: {{ fluentd_gcp_yaml_version }}
|
||||
spec:
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
labels:
|
||||
k8s-app: fluentd-gcp
|
||||
kubernetes.io/cluster-service: "true"
|
||||
version: v3.0.0
|
||||
version: {{ fluentd_gcp_yaml_version }}
|
||||
# This annotation ensures that fluentd does not get evicted if the node
|
||||
# supports critical pod annotation based priority scheme.
|
||||
# Note that this does not guarantee admission on the nodes (#40573).
|
||||
@@ -26,6 +26,7 @@ spec:
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: fluentd-gcp
|
||||
dnsPolicy: Default
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: fluentd-gcp
|
||||
image: gcr.io/stackdriver-agents/stackdriver-logging-agent:{{ fluentd_gcp_version }}
|
||||
@@ -79,7 +80,7 @@ spec:
|
||||
fi;
|
||||
# BEGIN_PROMETHEUS_TO_SD
|
||||
- name: prometheus-to-sd-exporter
|
||||
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
|
||||
image: k8s.gcr.io/prometheus-to-sd:v0.3.1
|
||||
command:
|
||||
- /monitor
|
||||
- --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons
|
||||
@@ -106,7 +107,12 @@ spec:
|
||||
effect: "NoExecute"
|
||||
- operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
terminationGracePeriodSeconds: 30
|
||||
terminationGracePeriodSeconds: 60
|
||||
tolerations:
|
||||
- operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
- operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
volumes:
|
||||
- name: varlog
|
||||
hostPath:
|
||||
|
6
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/scaler-deployment.yaml
generated
vendored
6
vendor/k8s.io/kubernetes/cluster/addons/fluentd-gcp/scaler-deployment.yaml
generated
vendored
@@ -5,7 +5,7 @@ metadata:
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: fluentd-gcp-scaler
|
||||
version: v0.3.0
|
||||
version: v0.5.0
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
selector:
|
||||
@@ -19,10 +19,10 @@ spec:
|
||||
serviceAccountName: fluentd-gcp-scaler
|
||||
containers:
|
||||
- name: fluentd-gcp-scaler
|
||||
image: k8s.gcr.io/fluentd-gcp-scaler:0.3
|
||||
image: k8s.gcr.io/fluentd-gcp-scaler:0.5
|
||||
command:
|
||||
- /scaler.sh
|
||||
- --ds-name=fluentd-gcp-v3.0.0
|
||||
- --ds-name=fluentd-gcp-{{ fluentd_gcp_yaml_version }}
|
||||
- --scaling-policy=fluentd-gcp-scaling-policy
|
||||
env:
|
||||
# Defaults, used if no overrides are found in fluentd-gcp-scaling-policy
|
||||
|
8
vendor/k8s.io/kubernetes/cluster/addons/ip-masq-agent/ip-masq-agent.yaml
generated
vendored
8
vendor/k8s.io/kubernetes/cluster/addons/ip-masq-agent/ip-masq-agent.yaml
generated
vendored
@@ -29,7 +29,9 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: ip-masq-agent
|
||||
image: k8s.gcr.io/ip-masq-agent-amd64:v2.0.2
|
||||
image: k8s.gcr.io/ip-masq-agent-amd64:v2.1.1
|
||||
args:
|
||||
- --masq-chain=IP-MASQ
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
@@ -52,5 +54,9 @@ spec:
|
||||
- key: config
|
||||
path: ip-masq-agent
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
|
3945
vendor/k8s.io/kubernetes/cluster/addons/istio/auth/istio-auth.yaml
generated
vendored
3945
vendor/k8s.io/kubernetes/cluster/addons/istio/auth/istio-auth.yaml
generated
vendored
File diff suppressed because it is too large
Load Diff
3932
vendor/k8s.io/kubernetes/cluster/addons/istio/noauth/istio.yaml
generated
vendored
3932
vendor/k8s.io/kubernetes/cluster/addons/istio/noauth/istio.yaml
generated
vendored
File diff suppressed because it is too large
Load Diff
2
vendor/k8s.io/kubernetes/cluster/addons/kube-proxy/kube-proxy-ds.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/kube-proxy/kube-proxy-ds.yaml
generated
vendored
@@ -24,7 +24,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
{{pod_priority}}
|
||||
priorityClassName: system-node-critical
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/kube-proxy-ds-ready: "true"
|
||||
|
@@ -7,9 +7,7 @@ metadata:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
- "apps"
|
||||
- "extensions"
|
||||
- "*"
|
||||
resources:
|
||||
- "*"
|
||||
verbs:
|
||||
|
85
vendor/k8s.io/kubernetes/cluster/addons/metadata-agent/stackdriver/metadata-agent.yaml
generated
vendored
85
vendor/k8s.io/kubernetes/cluster/addons/metadata-agent/stackdriver/metadata-agent.yaml
generated
vendored
@@ -7,22 +7,6 @@ metadata:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: metadata-agent-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
data:
|
||||
node_level.conf: |-
|
||||
KubernetesUseWatch: true
|
||||
KubernetesClusterLevelMetadata: false
|
||||
cluster_level.conf: |-
|
||||
KubernetesUseWatch: true
|
||||
KubernetesClusterLevelMetadata: true
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
@@ -44,28 +28,24 @@ spec:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
serviceAccountName: metadata-agent
|
||||
priorityClassName: system-node-critical
|
||||
containers:
|
||||
- image: gcr.io/stackdriver-agents/stackdriver-metadata-agent:0.2-0.0.19-1
|
||||
- image: gcr.io/stackdriver-agents/stackdriver-metadata-agent:0.2-0.0.21-1
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: metadata-agent
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
if [[ -f /var/run/metadata-agent/health/unhealthy ]]; then
|
||||
exit 1;
|
||||
fi
|
||||
periodSeconds: 10
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8000
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 60
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 1
|
||||
successThreshold: 1
|
||||
volumeMounts:
|
||||
- name: metadata-agent-config-volume
|
||||
mountPath: /etc/config
|
||||
command:
|
||||
- /opt/stackdriver/metadata/sbin/metadatad
|
||||
- --config-file=/etc/config/node_level.conf
|
||||
args:
|
||||
- -o KubernetesUseWatch=true
|
||||
- -o KubernetesClusterLevelMetadata=false
|
||||
- -o MetadataReporterPurgeDeleted=true
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
hostPort: 8799
|
||||
@@ -78,10 +58,11 @@ spec:
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: metadata-agent-config-volume
|
||||
configMap:
|
||||
name: metadata-agent-config
|
||||
tolerations:
|
||||
- operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
- operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
@@ -109,28 +90,24 @@ spec:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
serviceAccountName: metadata-agent
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: gcr.io/stackdriver-agents/stackdriver-metadata-agent:0.2-0.0.19-1
|
||||
- image: gcr.io/stackdriver-agents/stackdriver-metadata-agent:0.2-0.0.21-1
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: metadata-agent
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
if [[ -f /var/run/metadata-agent/health/unhealthy ]]; then
|
||||
exit 1;
|
||||
fi
|
||||
periodSeconds: 10
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8000
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 60
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 1
|
||||
successThreshold: 1
|
||||
volumeMounts:
|
||||
- name: metadata-agent-config-volume
|
||||
mountPath: /etc/config
|
||||
command:
|
||||
- /opt/stackdriver/metadata/sbin/metadatad
|
||||
- --config-file=/etc/config/cluster_level.conf
|
||||
args:
|
||||
- -o KubernetesUseWatch=true
|
||||
- -o KubernetesClusterLevelMetadata=true
|
||||
- -o MetadataReporterPurgeDeleted=true
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
@@ -142,10 +119,6 @@ spec:
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: metadata-agent-config-volume
|
||||
configMap:
|
||||
name: metadata-agent-config
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
|
16
vendor/k8s.io/kubernetes/cluster/addons/metadata-agent/stackdriver/podsecuritypolicies/metadata-agent-psp-binding.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/cluster/addons/metadata-agent/stackdriver/podsecuritypolicies/metadata-agent-psp-binding.yaml
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: gce:podsecuritypolicy:metadata-agent
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: gce:podsecuritypolicy:privileged
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metadata-agent
|
||||
namespace: kube-system
|
4
vendor/k8s.io/kubernetes/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml
generated
vendored
4
vendor/k8s.io/kubernetes/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml
generated
vendored
@@ -44,7 +44,7 @@ spec:
|
||||
effect: "NoSchedule"
|
||||
containers:
|
||||
- name: metadata-proxy
|
||||
image: k8s.gcr.io/metadata-proxy:v0.1.9
|
||||
image: k8s.gcr.io/metadata-proxy:v0.1.10
|
||||
securityContext:
|
||||
privileged: true
|
||||
# Request and limit resources to get guaranteed QoS.
|
||||
@@ -57,7 +57,7 @@ spec:
|
||||
cpu: "30m"
|
||||
# BEGIN_PROMETHEUS_TO_SD
|
||||
- name: prometheus-to-sd-exporter
|
||||
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
|
||||
image: k8s.gcr.io/prometheus-to-sd:v0.3.1
|
||||
# Request and limit resources to get guaranteed QoS.
|
||||
resources:
|
||||
requests:
|
||||
|
29
vendor/k8s.io/kubernetes/cluster/addons/metrics-server/metrics-server-deployment.yaml
generated
vendored
29
vendor/k8s.io/kubernetes/cluster/addons/metrics-server/metrics-server-deployment.yaml
generated
vendored
@@ -23,24 +23,24 @@ data:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: metrics-server-v0.2.1
|
||||
name: metrics-server-v0.3.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v0.2.1
|
||||
version: v0.3.1
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: metrics-server
|
||||
version: v0.2.1
|
||||
version: v0.3.1
|
||||
template:
|
||||
metadata:
|
||||
name: metrics-server
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
version: v0.2.1
|
||||
version: v0.3.1
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
@@ -49,16 +49,20 @@ spec:
|
||||
serviceAccountName: metrics-server
|
||||
containers:
|
||||
- name: metrics-server
|
||||
image: k8s.gcr.io/metrics-server-amd64:v0.2.1
|
||||
image: k8s.gcr.io/metrics-server-amd64:v0.3.1
|
||||
command:
|
||||
- /metrics-server
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --metric-resolution=30s
|
||||
# These are needed for GKE, which doesn't support secure communication yet.
|
||||
# Remove these lines for non-GKE clusters, and when GKE supports token-based auth.
|
||||
- --kubelet-port=10255
|
||||
- --deprecated-kubelet-completely-insecure=true
|
||||
ports:
|
||||
- containerPort: 443
|
||||
name: https
|
||||
protocol: TCP
|
||||
- name: metrics-server-nanny
|
||||
image: k8s.gcr.io/addon-resizer:1.8.1
|
||||
image: k8s.gcr.io/addon-resizer:1.8.3
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
@@ -81,15 +85,18 @@ spec:
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --config-dir=/etc/config
|
||||
- --cpu=40m
|
||||
- --cpu={{ base_metrics_server_cpu }}
|
||||
- --extra-cpu=0.5m
|
||||
- --memory=40Mi
|
||||
- --extra-memory=4Mi
|
||||
- --memory={{ base_metrics_server_memory }}
|
||||
- --extra-memory={{ metrics_server_memory_per_node }}Mi
|
||||
- --threshold=5
|
||||
- --deployment=metrics-server-v0.2.1
|
||||
- --deployment=metrics-server-v0.3.1
|
||||
- --container=metrics-server
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
# Specifies the smallest cluster (defined in number of nodes)
|
||||
# resources will be scaled to.
|
||||
- --minClusterSize={{ metrics_server_min_cluster_size }}
|
||||
volumes:
|
||||
- name: metrics-server-config-volume
|
||||
configMap:
|
||||
|
@@ -33,6 +33,9 @@ rules:
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["statefulsets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
5
vendor/k8s.io/kubernetes/cluster/addons/runtimeclass/OWNERS
generated
vendored
Normal file
5
vendor/k8s.io/kubernetes/cluster/addons/runtimeclass/OWNERS
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
approvers:
|
||||
- tallclair
|
||||
- dchen1107
|
||||
reviewers:
|
||||
- sig-node-reviewers
|
12
vendor/k8s.io/kubernetes/cluster/addons/runtimeclass/README.md
generated
vendored
Normal file
12
vendor/k8s.io/kubernetes/cluster/addons/runtimeclass/README.md
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# RuntimeClass
|
||||
|
||||
RuntimeClass is an alpha feature for supporting multiple container runtimes within a cluster. When
|
||||
enabled, pods can select a RuntimeClass to run with using the `PodSpec.RuntimeClassName` field.
|
||||
|
||||
To enable RuntimeClass, set the feature gate `RuntimeClass=true`, and ensure the CRD defined in this
|
||||
directory is installed.
|
||||
|
||||
For more information, see:
|
||||
https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md
|
||||
|
||||
[]()
|
26
vendor/k8s.io/kubernetes/cluster/addons/runtimeclass/runtimeclass_crd.yaml
generated
vendored
Normal file
26
vendor/k8s.io/kubernetes/cluster/addons/runtimeclass/runtimeclass_crd.yaml
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
kind: CustomResourceDefinition
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: runtimeclasses.node.k8s.io
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
group: node.k8s.io
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
names:
|
||||
plural: runtimeclasses
|
||||
singular: runtimeclass
|
||||
kind: RuntimeClass
|
||||
scope: Cluster
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
spec:
|
||||
properties:
|
||||
runtimeHandler:
|
||||
type: string
|
||||
pattern: '^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)?$'
|
2
vendor/k8s.io/kubernetes/cluster/addons/storage-class/local/default.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/storage-class/local/default.yaml
generated
vendored
@@ -6,5 +6,5 @@ metadata:
|
||||
annotations:
|
||||
storageclass.beta.kubernetes.io/is-default-class: "true"
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
provisioner: kubernetes.io/host-path
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/centos/config-default.sh
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/centos/config-default.sh
generated
vendored
@@ -124,7 +124,7 @@ export FLANNEL_NET=${FLANNEL_NET:-"172.16.0.0/16"}
|
||||
# modification is overwritten.
|
||||
# If we included ResourceQuota, we should keep it at the end of the list to
|
||||
# prevent incrementing quota usage prematurely.
|
||||
export ADMISSION_CONTROL=${ADMISSION_CONTROL:-"Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeClaimResize,DefaultTolerationSeconds,Priority,StorageObjectInUseProtection,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"}
|
||||
export ADMISSION_CONTROL=${ADMISSION_CONTROL:-"NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeClaimResize,DefaultTolerationSeconds,Priority,StorageObjectInUseProtection,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"}
|
||||
|
||||
# Extra options to set on the Docker command line.
|
||||
# This is useful for setting --insecure-registry for local registries.
|
||||
|
2
vendor/k8s.io/kubernetes/cluster/centos/master/scripts/apiserver.sh
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/centos/master/scripts/apiserver.sh
generated
vendored
@@ -66,6 +66,8 @@ KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
|
||||
# LimitRanger, AlwaysDeny, SecurityContextDeny, NamespaceExists,
|
||||
# NamespaceLifecycle, NamespaceAutoProvision, AlwaysAdmit,
|
||||
# ServiceAccount, DefaultStorageClass, DefaultTolerationSeconds, ResourceQuota
|
||||
# Mark Deprecated. Use --enable-admission-plugins or --disable-admission-plugins instead since v1.10.
|
||||
# It will be removed in a future version.
|
||||
KUBE_ADMISSION_CONTROL="--admission-control=${ADMISSION_CONTROL}"
|
||||
|
||||
# --client-ca-file="": If set, any request presenting a client certificate signed
|
||||
|
3
vendor/k8s.io/kubernetes/cluster/centos/master/scripts/controller-manager.sh
generated
vendored
3
vendor/k8s.io/kubernetes/cluster/centos/master/scripts/controller-manager.sh
generated
vendored
@@ -30,7 +30,8 @@ KUBE_CONTROLLER_MANAGER_ROOT_CA_FILE="--root-ca-file=/srv/kubernetes/ca.crt"
|
||||
# RSA key used to sign service account tokens.
|
||||
KUBE_CONTROLLER_MANAGER_SERVICE_ACCOUNT_PRIVATE_KEY_FILE="--service-account-private-key-file=/srv/kubernetes/server.key"
|
||||
|
||||
# --leader-elect
|
||||
# --leader-elect: Start a leader election client and gain leadership before
|
||||
# executing the main loop. Enable this when running replicated components for high availability.
|
||||
KUBE_LEADER_ELECT="--leader-elect"
|
||||
EOF
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/cluster/centos/master/scripts/scheduler.sh
generated
vendored
4
vendor/k8s.io/kubernetes/cluster/centos/master/scripts/scheduler.sh
generated
vendored
@@ -27,9 +27,11 @@ KUBE_LOGTOSTDERR="--logtostderr=true"
|
||||
# --v=0: log level for V logs
|
||||
KUBE_LOG_LEVEL="--v=4"
|
||||
|
||||
# --master: The address of the Kubernetes API server (overrides any value in kubeconfig).
|
||||
KUBE_MASTER="--master=${MASTER_ADDRESS}:8080"
|
||||
|
||||
# --leader-elect
|
||||
# --leader-elect: Start a leader election client and gain leadership before
|
||||
# executing the main loop. Enable this when running replicated components for high availability.
|
||||
KUBE_LEADER_ELECT="--leader-elect"
|
||||
|
||||
# Add your own!
|
||||
|
1
vendor/k8s.io/kubernetes/cluster/centos/node/scripts/kubelet.sh
generated
vendored
1
vendor/k8s.io/kubernetes/cluster/centos/node/scripts/kubelet.sh
generated
vendored
@@ -87,6 +87,7 @@ EnvironmentFile=-/opt/kubernetes/cfg/kubelet
|
||||
ExecStart=/opt/kubernetes/bin/kubelet ${KUBELET_OPTS}
|
||||
Restart=on-failure
|
||||
KillMode=process
|
||||
RestartSec=15s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
3
vendor/k8s.io/kubernetes/cluster/clientbin.sh
generated
vendored
3
vendor/k8s.io/kubernetes/cluster/clientbin.sh
generated
vendored
@@ -47,6 +47,9 @@ case "$(uname -m)" in
|
||||
arm*)
|
||||
host_arch=arm
|
||||
;;
|
||||
aarch64*)
|
||||
host_arch=arm64
|
||||
;;
|
||||
i?86*)
|
||||
host_arch=386
|
||||
;;
|
||||
|
@@ -3,12 +3,12 @@ kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: cloud-provider
|
||||
name: gce:cloud-provider
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: cloud-provider
|
||||
name: gce:cloud-provider
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cloud-provider
|
||||
@@ -19,11 +19,11 @@ kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: cloud-provider
|
||||
name: gce:cloud-provider
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cloud-provider
|
||||
name: gce:cloud-provider
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cloud-provider
|
||||
|
48
vendor/k8s.io/kubernetes/cluster/gce/addons/loadbalancing/cloud-provider-role.yaml
generated
vendored
48
vendor/k8s.io/kubernetes/cluster/gce/addons/loadbalancing/cloud-provider-role.yaml
generated
vendored
@@ -3,7 +3,7 @@ kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: cloud-provider
|
||||
name: gce:cloud-provider
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -23,7 +23,51 @@ kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: cloud-provider
|
||||
name: gce:cloud-provider
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: cloud-provider
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
kubernetes.io/deprecation: 'cloud-provider role is DEPRECATED in the
|
||||
concern of potential collisions and will be removed in 1.16. Do not use
|
||||
this role.'
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: cloud-provider
|
||||
annotations:
|
||||
kubernetes.io/deprecation: 'cloud-provider clusterrole is DEPRECATED in the
|
||||
concern of potential collisions and will be removed in 1.16. Do not use
|
||||
this role.'
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
4
vendor/k8s.io/kubernetes/cluster/gce/addons/node-termination-handler/README
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/cluster/gce/addons/node-termination-handler/README
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
# GCE Node Termination Handler
|
||||
|
||||
This addon deploys [GCE Node Termination Handler](https://github.com/GoogleCloudPlatform/k8s-node-termination-handler) on to kubernetes clusters on GCP.
|
||||
It is meant to help translate GCE VM termination notifications into kubernetes graceful terminations.
|
76
vendor/k8s.io/kubernetes/cluster/gce/addons/node-termination-handler/daemonset.yaml
generated
vendored
Normal file
76
vendor/k8s.io/kubernetes/cluster/gce/addons/node-termination-handler/daemonset.yaml
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
k8s-app: node-termination-handler
|
||||
namespace: kube-system
|
||||
name: node-termination-handler
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: node-termination-handler
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: node-termination-handler
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
# Necessary to reboot node
|
||||
hostPID: true
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
# Restrict to GPU nodes or preemptible nodes
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: cloud.google.com/gke-accelerator
|
||||
operator: Exists
|
||||
- matchExpressions:
|
||||
- key: cloud.google.com/gke-preemptible
|
||||
operator: Exists
|
||||
volumes:
|
||||
- name: klet-service-account
|
||||
hostPath:
|
||||
path: /var/lib/kubelet
|
||||
- name: klet-ca-crt
|
||||
hostPath:
|
||||
path: /etc/srv/kubernetes
|
||||
tolerations:
|
||||
# Run regardless of any existing taints.
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
containers:
|
||||
- image: k8s.gcr.io/gke-node-termination-handler@sha256:e08ca863a547754fa7b75064bdad04f04cbef86c7b0a181ecc7304e747623181
|
||||
name: node-termination-handler
|
||||
command: ["./node-termination-handler"]
|
||||
args: ["--logtostderr", "--exclude-pods=$(POD_NAME):$(POD_NAMESPACE)", "-v=10", "--kubeconfig=/var/lib/kubelet/kubeconfig", "--annotation=cloud.google.com/impending-node-termination"]
|
||||
securityContext:
|
||||
capabilities:
|
||||
# Necessary to reboot node
|
||||
add: ["SYS_BOOT"]
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
resources:
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: 30Mi
|
||||
volumeMounts:
|
||||
- name: klet-service-account
|
||||
mountPath: /var/lib/kubelet
|
||||
- name: klet-ca-crt
|
||||
mountPath: /etc/srv/kubernetes
|
16
vendor/k8s.io/kubernetes/cluster/gce/addons/podsecuritypolicies/unprivileged-addon.yaml
generated
vendored
16
vendor/k8s.io/kubernetes/cluster/gce/addons/podsecuritypolicies/unprivileged-addon.yaml
generated
vendored
@@ -19,6 +19,22 @@ metadata:
|
||||
spec:
|
||||
privileged: false
|
||||
allowPrivilegeEscalation: false
|
||||
# The docker default set of capabilities
|
||||
allowedCapabilities:
|
||||
- SETPCAP
|
||||
- MKNOD
|
||||
- AUDIT_WRITE
|
||||
- CHOWN
|
||||
- NET_RAW
|
||||
- DAC_OVERRIDE
|
||||
- FOWNER
|
||||
- FSETID
|
||||
- KILL
|
||||
- SETGID
|
||||
- SETUID
|
||||
- NET_BIND_SERVICE
|
||||
- SYS_CHROOT
|
||||
- SETFCAP
|
||||
volumes:
|
||||
- 'emptyDir'
|
||||
- 'configMap'
|
||||
|
4
vendor/k8s.io/kubernetes/cluster/gce/config-common.sh
generated
vendored
4
vendor/k8s.io/kubernetes/cluster/gce/config-common.sh
generated
vendored
@@ -99,13 +99,13 @@ function get-cluster-ip-range {
|
||||
}
|
||||
|
||||
# Calculate ip alias range based on max number of pods.
|
||||
# Let pow be the smallest integer which is bigger than log2($1 * 2).
|
||||
# Let pow be the smallest integer which is bigger or equal to log2($1 * 2).
|
||||
# (32 - pow) will be returned.
|
||||
#
|
||||
# $1: The number of max pods limitation.
|
||||
function get-alias-range-size() {
|
||||
for pow in {0..31}; do
|
||||
if (( 1 << $pow > $1 * 2 )); then
|
||||
if (( 1 << $pow >= $1 * 2 )); then
|
||||
echo $((32 - pow))
|
||||
return 0
|
||||
fi
|
||||
|
57
vendor/k8s.io/kubernetes/cluster/gce/config-default.sh
generated
vendored
57
vendor/k8s.io/kubernetes/cluster/gce/config-default.sh
generated
vendored
@@ -37,6 +37,14 @@ MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)}
|
||||
NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
|
||||
NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
|
||||
NODE_LOCAL_SSDS=${NODE_LOCAL_SSDS:-0}
|
||||
|
||||
# Historically fluentd was a manifest pod and then was migrated to DaemonSet.
|
||||
# To avoid situation during cluster upgrade when there are two instances
|
||||
# of fluentd running on a node, kubelet need to mark node on which
|
||||
# fluentd is not running as a manifest pod with appropriate label.
|
||||
# TODO(piosz): remove this in 1.8
|
||||
NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
|
||||
|
||||
# An extension to local SSDs allowing users to specify block/fs and SCSI/NVMe devices
|
||||
# Format of this variable will be "#,scsi/nvme,block/fs" you can specify multiple
|
||||
# configurations by separating them by a semi-colon ex. "2,scsi,fs;1,nvme,block"
|
||||
@@ -51,6 +59,7 @@ PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}
|
||||
KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true}
|
||||
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-} # default value calculated below
|
||||
CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
|
||||
MIG_WAIT_UNTIL_STABLE_TIMEOUT=${MIG_WAIT_UNTIL_STABLE_TIMEOUT:-1800}
|
||||
|
||||
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
|
||||
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
|
||||
@@ -73,7 +82,6 @@ fi
|
||||
# you are updating the os image versions, update this variable.
|
||||
# Also please update corresponding image for node e2e at:
|
||||
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
|
||||
CVM_VERSION=${CVM_VERSION:-container-vm-v20170627}
|
||||
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-65-10323-64-0}
|
||||
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
|
||||
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
|
||||
@@ -106,7 +114,6 @@ CLUSTER_NAME="${CLUSTER_NAME:-${INSTANCE_PREFIX}}"
|
||||
MASTER_NAME="${INSTANCE_PREFIX}-master"
|
||||
AGGREGATOR_MASTER_NAME="${INSTANCE_PREFIX}-aggregator"
|
||||
INITIAL_ETCD_CLUSTER="${MASTER_NAME}"
|
||||
ETCD_QUORUM_READ="${ENABLE_ETCD_QUORUM_READ:-false}"
|
||||
MASTER_TAG="${INSTANCE_PREFIX}-master"
|
||||
NODE_TAG="${INSTANCE_PREFIX}-minion"
|
||||
|
||||
@@ -164,16 +171,15 @@ ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}"
|
||||
# Useful for scheduling heapster in large clusters with nodes of small size.
|
||||
HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
|
||||
|
||||
# Historically fluentd was a manifest pod and then was migrated to DaemonSet.
|
||||
# To avoid situation during cluster upgrade when there are two instances
|
||||
# of fluentd running on a node, kubelet need to mark node on which
|
||||
# fluentd is not running as a manifest pod with appropriate label.
|
||||
# TODO(piosz): remove this in 1.8
|
||||
NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
|
||||
|
||||
# NON_MASTER_NODE_LABELS are labels will only be applied on non-master nodes.
|
||||
NON_MASTER_NODE_LABELS="${KUBE_NON_MASTER_NODE_LABELS:-}"
|
||||
|
||||
if [[ "${PREEMPTIBLE_MASTER}" == "true" ]]; then
|
||||
NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-preemptible=true"
|
||||
elif [[ "${PREEMPTIBLE_NODE}" == "true" ]]; then
|
||||
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-preemptible=true"
|
||||
fi
|
||||
|
||||
# To avoid running Calico on a node that is not configured appropriately,
|
||||
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
|
||||
if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then
|
||||
@@ -183,6 +189,8 @@ fi
|
||||
# Optional: Enable netd.
|
||||
ENABLE_NETD="${KUBE_ENABLE_NETD:-false}"
|
||||
CUSTOM_NETD_YAML="${KUBE_CUSTOM_NETD_YAML:-}"
|
||||
CUSTOM_CALICO_NODE_DAEMONSET_YAML="${KUBE_CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}"
|
||||
CUSTOM_TYPHA_DEPLOYMENT_YAML="${KUBE_CUSTOM_TYPHA_DEPLOYMENT_YAML:-}"
|
||||
|
||||
# To avoid running netd on a node that is not configured appropriately,
|
||||
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
|
||||
@@ -195,6 +203,7 @@ fi
|
||||
#
|
||||
# TODO(#8867) Enable by default.
|
||||
ENABLE_METADATA_CONCEALMENT="${ENABLE_METADATA_CONCEALMENT:-false}" # true, false
|
||||
METADATA_CONCEALMENT_NO_FIREWALL="${METADATA_CONCEALMENT_NO_FIREWALL:-false}" # true, false
|
||||
if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then
|
||||
# Put the necessary label on the node so the daemonset gets scheduled.
|
||||
NODE_LABELS="${NODE_LABELS},beta.kubernetes.io/metadata-proxy-ready=true"
|
||||
@@ -216,7 +225,7 @@ resources:
|
||||
- aesgcm:
|
||||
keys:
|
||||
- name: key1
|
||||
secret: $(dd if=/dev/random bs=32 count=1 status=none | base64 | tr -d '\r\n')
|
||||
secret: $(dd if=/dev/urandom iflag=fullblock bs=32 count=1 2>/dev/null | base64 | tr -d '\r\n')
|
||||
EOM
|
||||
)
|
||||
fi
|
||||
@@ -247,7 +256,7 @@ FEATURE_GATES="${KUBE_FEATURE_GATES:-ExperimentalCriticalPodAnnotation=true}"
|
||||
if [[ ! -z "${NODE_ACCELERATORS}" ]]; then
|
||||
FEATURE_GATES="${FEATURE_GATES},DevicePlugins=true"
|
||||
if [[ "${NODE_ACCELERATORS}" =~ .*type=([a-zA-Z0-9-]+).* ]]; then
|
||||
NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}"
|
||||
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}"
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -286,9 +295,6 @@ if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
|
||||
AUTOSCALER_EXPANDER_CONFIG="${KUBE_AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
|
||||
fi
|
||||
|
||||
# Optional: Enable Rescheduler
|
||||
ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
|
||||
|
||||
# Optional: Enable allocation of pod IPs using IP aliases.
|
||||
#
|
||||
# BETA FEATURE.
|
||||
@@ -339,7 +345,7 @@ if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
|
||||
fi
|
||||
|
||||
# Admission Controllers to invoke prior to persisting objects in cluster
|
||||
ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection
|
||||
ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection
|
||||
|
||||
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
|
||||
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
|
||||
@@ -391,10 +397,6 @@ METADATA_CLOBBERS_CONFIG="${METADATA_CLOBBERS_CONFIG:-false}"
|
||||
|
||||
ENABLE_BIG_CLUSTER_SUBNETS="${ENABLE_BIG_CLUSTER_SUBNETS:-false}"
|
||||
|
||||
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then
|
||||
echo "Warning: Basic audit logging is deprecated and will be removed. Please use advanced auditing instead."
|
||||
fi
|
||||
|
||||
if [[ -n "${LOGROTATE_FILES_MAX_COUNT:-}" ]]; then
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_FILES_MAX_COUNT"
|
||||
fi
|
||||
@@ -403,7 +405,9 @@ if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
|
||||
fi
|
||||
|
||||
# Fluentd requirements
|
||||
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.2-1.5.30-1-k8s}"
|
||||
# YAML exists to trigger a configuration refresh when changes are made.
|
||||
FLUENTD_GCP_YAML_VERSION="v3.1.0"
|
||||
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.5-1.5.36-1-k8s}"
|
||||
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}"
|
||||
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}"
|
||||
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}"
|
||||
@@ -414,11 +418,14 @@ HEAPSTER_GCP_MEMORY_PER_NODE="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
|
||||
HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}"
|
||||
HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
|
||||
|
||||
# Optional: custom system banner for dashboard addon
|
||||
CUSTOM_KUBE_DASHBOARD_BANNER="${CUSTOM_KUBE_DASHBOARD_BANNER:-}"
|
||||
|
||||
# Default Stackdriver resources version exported by Fluentd-gcp addon
|
||||
LOGGING_STACKDRIVER_RESOURCE_TYPES="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}"
|
||||
|
||||
# Adding to PROVIDER_VARS, since this is GCP-specific.
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER LOGGING_STACKDRIVER_RESOURCE_TYPES"
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_YAML_VERSION FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER LOGGING_STACKDRIVER_RESOURCE_TYPES"
|
||||
|
||||
# Fluentd configuration for node-journal
|
||||
ENABLE_NODE_JOURNAL="${ENABLE_NODE_JOURNAL:-false}"
|
||||
@@ -453,3 +460,11 @@ if [[ "${ENABLE_TOKENREQUEST:-}" == "true" ]]; then
|
||||
SERVICEACCOUNT_ISSUER="https://kubernetes.io/${CLUSTER_NAME}"
|
||||
SERVICEACCOUNT_API_AUDIENCES="https://kubernetes.default.svc"
|
||||
fi
|
||||
|
||||
# Optional: Enable Node termination Handler for Preemptible and GPU VMs.
|
||||
# https://github.com/GoogleCloudPlatform/k8s-node-termination-handler
|
||||
ENABLE_NODE_TERMINATION_HANDLER="${ENABLE_NODE_TERMINATION_HANDLER:-false}"
|
||||
# Override default Node Termination Handler Image
|
||||
if [[ "${NODE_TERMINATION_HANDLER_IMAGE:-}" ]]; then
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} NODE_TERMINATION_HANDLER_IMAGE"
|
||||
fi
|
||||
|
61
vendor/k8s.io/kubernetes/cluster/gce/config-test.sh
generated
vendored
61
vendor/k8s.io/kubernetes/cluster/gce/config-test.sh
generated
vendored
@@ -37,6 +37,14 @@ MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)}
|
||||
NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
|
||||
NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
|
||||
NODE_LOCAL_SSDS=${NODE_LOCAL_SSDS:-0}
|
||||
|
||||
# Historically fluentd was a manifest pod and then was migrated to DaemonSet.
|
||||
# To avoid situation during cluster upgrade when there are two instances
|
||||
# of fluentd running on a node, kubelet need to mark node on which
|
||||
# fluentd is not running as a manifest pod with appropriate label.
|
||||
# TODO(piosz): remove this in 1.8
|
||||
NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
|
||||
|
||||
# An extension to local SSDs allowing users to specify block/fs and SCSI/NVMe devices
|
||||
# Format of this variable will be "#,scsi/nvme,block/fs" you can specify multiple
|
||||
# configurations by separating them by a semi-colon ex. "2,scsi,fs;1,nvme,block"
|
||||
@@ -50,6 +58,7 @@ PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}
|
||||
KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true}
|
||||
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-true}
|
||||
CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
|
||||
MIG_WAIT_UNTIL_STABLE_TIMEOUT=${MIG_WAIT_UNTIL_STABLE_TIMEOUT:-1800}
|
||||
|
||||
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
|
||||
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
|
||||
@@ -75,7 +84,6 @@ ALLOWED_NOTREADY_NODES="${ALLOWED_NOTREADY_NODES:-$((NUM_NODES / 100))}"
|
||||
# you are updating the os image versions, update this variable.
|
||||
# Also please update corresponding image for node e2e at:
|
||||
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
|
||||
CVM_VERSION=${CVM_VERSION:-container-vm-v20170627}
|
||||
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-65-10323-64-0}
|
||||
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
|
||||
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
|
||||
@@ -101,7 +109,6 @@ CLUSTER_NAME="${CLUSTER_NAME:-${INSTANCE_PREFIX}}"
|
||||
MASTER_NAME="${INSTANCE_PREFIX}-master"
|
||||
AGGREGATOR_MASTER_NAME="${INSTANCE_PREFIX}-aggregator"
|
||||
INITIAL_ETCD_CLUSTER="${MASTER_NAME}"
|
||||
ETCD_QUORUM_READ="${ENABLE_ETCD_QUORUM_READ:-false}"
|
||||
MASTER_TAG="${INSTANCE_PREFIX}-master"
|
||||
NODE_TAG="${INSTANCE_PREFIX}-minion"
|
||||
|
||||
@@ -166,7 +173,7 @@ ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}"
|
||||
# Useful for scheduling heapster in large clusters with nodes of small size.
|
||||
HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
|
||||
|
||||
# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.2.18-0) if you need
|
||||
# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.2.24-1) if you need
|
||||
# non-default version.
|
||||
ETCD_IMAGE="${TEST_ETCD_IMAGE:-}"
|
||||
ETCD_DOCKER_REPOSITORY="${TEST_ETCD_DOCKER_REPOSITORY:-}"
|
||||
@@ -202,19 +209,20 @@ CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-} ${TEST_CLUSTER_R
|
||||
SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
|
||||
KUBEPROXY_TEST_ARGS="${KUBEPROXY_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
|
||||
|
||||
# Historically fluentd was a manifest pod and then was migrated to DaemonSet.
|
||||
# To avoid situation during cluster upgrade when there are two instances
|
||||
# of fluentd running on a node, kubelet need to mark node on which
|
||||
# fluentd is not running as a manifest pod with appropriate label.
|
||||
# TODO(piosz): remove this in 1.8
|
||||
NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
|
||||
|
||||
# NON_MASTER_NODE_LABELS are labels will only be applied on non-master nodes.
|
||||
NON_MASTER_NODE_LABELS="${KUBE_NON_MASTER_NODE_LABELS:-}"
|
||||
|
||||
if [[ "${PREEMPTIBLE_MASTER}" == "true" ]]; then
|
||||
NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-preemptible=true"
|
||||
elif [[ "${PREEMPTIBLE_NODE}" == "true" ]]; then
|
||||
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-preemptible=true"
|
||||
fi
|
||||
|
||||
# Optional: Enable netd.
|
||||
ENABLE_NETD="${KUBE_ENABLE_NETD:-false}"
|
||||
CUSTOM_NETD_YAML="${KUBE_CUSTOM_NETD_YAML:-}"
|
||||
CUSTOM_CALICO_NODE_DAEMONSET_YAML="${KUBE_CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}"
|
||||
CUSTOM_TYPHA_DEPLOYMENT_YAML="${KUBE_CUSTOM_TYPHA_DEPLOYMENT_YAML:-}"
|
||||
|
||||
# To avoid running netd on a node that is not configured appropriately,
|
||||
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
|
||||
@@ -231,6 +239,7 @@ fi
|
||||
# Enable metadata concealment by firewalling pod traffic to the metadata server
|
||||
# and run a proxy daemonset on nodes.
|
||||
ENABLE_METADATA_CONCEALMENT="${ENABLE_METADATA_CONCEALMENT:-true}" # true, false
|
||||
METADATA_CONCEALMENT_NO_FIREWALL="${METADATA_CONCEALMENT_NO_FIREWALL:-false}" # true, false
|
||||
if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then
|
||||
# Put the necessary label on the node so the daemonset gets scheduled.
|
||||
NODE_LABELS="${NODE_LABELS},beta.kubernetes.io/metadata-proxy-ready=true"
|
||||
@@ -254,7 +263,7 @@ fi
|
||||
if [[ ! -z "${NODE_ACCELERATORS}" ]]; then
|
||||
FEATURE_GATES="${FEATURE_GATES},DevicePlugins=true"
|
||||
if [[ "${NODE_ACCELERATORS}" =~ .*type=([a-zA-Z0-9-]+).* ]]; then
|
||||
NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}"
|
||||
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}"
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -293,9 +302,6 @@ if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
|
||||
AUTOSCALER_EXPANDER_CONFIG="${KUBE_AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
|
||||
fi
|
||||
|
||||
# Optional: Enable Rescheduler
|
||||
ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
|
||||
|
||||
# Optional: Enable allocation of pod IPs using IP aliases.
|
||||
#
|
||||
# BETA FEATURE.
|
||||
@@ -346,7 +352,7 @@ if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
|
||||
fi
|
||||
|
||||
if [[ -z "${KUBE_ADMISSION_CONTROL:-}" ]]; then
|
||||
ADMISSION_CONTROL="Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,PodPreset,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection"
|
||||
ADMISSION_CONTROL="NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,PodPreset,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection"
|
||||
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
|
||||
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
|
||||
fi
|
||||
@@ -383,10 +389,6 @@ HAIRPIN_MODE="${HAIRPIN_MODE:-hairpin-veth}" # promiscuous-bridge, hairpin-veth,
|
||||
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
|
||||
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
|
||||
|
||||
# Optional: if set to true, a image puller is deployed. Only for use in e2e clusters.
|
||||
# TODO: Pipe this through GKE e2e clusters once we know it helps.
|
||||
PREPULL_E2E_IMAGES="${PREPULL_E2E_IMAGES:-true}"
|
||||
|
||||
# Evict pods whenever compute resource availability on the nodes gets below a threshold.
|
||||
EVICTION_HARD="${EVICTION_HARD:-memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%}"
|
||||
|
||||
@@ -405,10 +407,6 @@ ENABLE_LEGACY_ABAC="${ENABLE_LEGACY_ABAC:-false}" # true, false
|
||||
ENABLE_APISERVER_ADVANCED_AUDIT="${ENABLE_APISERVER_ADVANCED_AUDIT:-true}" # true, false
|
||||
ADVANCED_AUDIT_LOG_MODE="${ADVANCED_AUDIT_LOG_MODE:-batch}" # batch, blocking
|
||||
|
||||
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then
|
||||
echo "Warning: Basic audit logging is deprecated and will be removed. Please use advanced auditing instead."
|
||||
fi
|
||||
|
||||
ENABLE_BIG_CLUSTER_SUBNETS="${ENABLE_BIG_CLUSTER_SUBNETS:-false}"
|
||||
|
||||
if [[ -n "${LOGROTATE_FILES_MAX_COUNT:-}" ]]; then
|
||||
@@ -419,7 +417,9 @@ if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
|
||||
fi
|
||||
|
||||
# Fluentd requirements
|
||||
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.2-1.5.30-1-k8s}"
|
||||
# YAML exists to trigger a configuration refresh when changes are made.
|
||||
FLUENTD_GCP_YAML_VERSION="v3.1.0"
|
||||
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.5-1.5.36-1-k8s}"
|
||||
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}"
|
||||
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}"
|
||||
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}"
|
||||
@@ -430,11 +430,14 @@ HEAPSTER_GCP_MEMORY_PER_NODE="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
|
||||
HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}"
|
||||
HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
|
||||
|
||||
# Optional: custom system banner for dashboard addon
|
||||
CUSTOM_KUBE_DASHBOARD_BANNER="${CUSTOM_KUBE_DASHBOARD_BANNER:-}"
|
||||
|
||||
# Default Stackdriver resources version exported by Fluentd-gcp addon
|
||||
LOGGING_STACKDRIVER_RESOURCE_TYPES="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}"
|
||||
|
||||
# Adding to PROVIDER_VARS, since this is GCP-specific.
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER LOGGING_STACKDRIVER_RESOURCE_TYPES"
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_YAML_VERSION FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER LOGGING_STACKDRIVER_RESOURCE_TYPES"
|
||||
|
||||
# Fluentd configuration for node-journal
|
||||
ENABLE_NODE_JOURNAL="${ENABLE_NODE_JOURNAL:-false}"
|
||||
@@ -472,3 +475,11 @@ if [[ "${ENABLE_TOKENREQUEST:-}" == "true" ]]; then
|
||||
SERVICEACCOUNT_ISSUER="https://kubernetes.io/${CLUSTER_NAME}"
|
||||
SERVICEACCOUNT_API_AUDIENCES="https://kubernetes.default.svc"
|
||||
fi
|
||||
|
||||
# Optional: Enable Node termination Handler for Preemptible and GPU VMs.
|
||||
# https://github.com/GoogleCloudPlatform/k8s-node-termination-handler
|
||||
ENABLE_NODE_TERMINATION_HANDLER="${ENABLE_NODE_TERMINATION_HANDLER:-false}"
|
||||
# Override default Node Termination Handler Image
|
||||
if [[ "${NODE_TERMINATION_HANDLER_IMAGE:-}" ]]; then
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} NODE_TERMINATION_HANDLER_IMAGE"
|
||||
fi
|
||||
|
5
vendor/k8s.io/kubernetes/cluster/gce/gci/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/cluster/gce/gci/BUILD
generated
vendored
@@ -14,8 +14,8 @@ go_test(
|
||||
],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -35,6 +35,7 @@ release_filegroup(
|
||||
|
||||
pkg_tar(
|
||||
name = "gci-trusty-manifests",
|
||||
srcs = glob(["gke-internal-configure-helper.sh"]),
|
||||
files = {
|
||||
"//cluster/gce/gci/mounter": "gci-mounter",
|
||||
"configure-helper.sh": "gci-configure-helper.sh",
|
||||
|
4
vendor/k8s.io/kubernetes/cluster/gce/gci/OWNERS
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/cluster/gce/gci/OWNERS
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
approvers:
|
||||
- dchen1107
|
||||
- filbranden
|
||||
- yguo0905
|
2
vendor/k8s.io/kubernetes/cluster/gce/gci/apiserver_manifest_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/gce/gci/apiserver_manifest_test.go
generated
vendored
@@ -51,7 +51,7 @@ readonly DOCKER_REGISTRY="k8s.gcr.io"
|
||||
readonly ENABLE_LEGACY_ABAC=false
|
||||
readonly ETC_MANIFESTS=${KUBE_HOME}/etc/kubernetes/manifests
|
||||
readonly KUBE_API_SERVER_DOCKER_TAG=v1.11.0-alpha.0.1808_3c7452dc11645d-dirty
|
||||
readonly LOG_OWNER_USER=$(whoami)
|
||||
readonly LOG_OWNER_USER=$(id -un)
|
||||
readonly LOG_OWNER_GROUP=$(id -gn)
|
||||
ENCRYPTION_PROVIDER_CONFIG={{.EncryptionProviderConfig}}
|
||||
ENCRYPTION_PROVIDER_CONFIG_PATH={{.EncryptionProviderConfigPath}}
|
||||
|
246
vendor/k8s.io/kubernetes/cluster/gce/gci/configure-helper.sh
generated
vendored
246
vendor/k8s.io/kubernetes/cluster/gce/gci/configure-helper.sh
generated
vendored
@@ -30,6 +30,10 @@ readonly UUID_BLOCK_PREFIX="/dev/disk/by-uuid/google-local-ssds"
|
||||
readonly COREDNS_AUTOSCALER="Deployment/coredns"
|
||||
readonly KUBEDNS_AUTOSCALER="Deployment/kube-dns"
|
||||
|
||||
# Resource requests of master components.
|
||||
KUBE_CONTROLLER_MANAGER_CPU_REQUEST="${KUBE_CONTROLLER_MANAGER_CPU_REQUEST:-200m}"
|
||||
KUBE_SCHEDULER_CPU_REQUEST="${KUBE_SCHEDULER_CPU_REQUEST:-75m}"
|
||||
|
||||
# Use --retry-connrefused opt only if it's supported by curl.
|
||||
CURL_RETRY_CONNREFUSED=""
|
||||
if curl --help | grep -q -- '--retry-connrefused'; then
|
||||
@@ -43,6 +47,40 @@ function setup-os-params {
|
||||
echo "core.%e.%p.%t" > /proc/sys/kernel/core_pattern
|
||||
}
|
||||
|
||||
# secure_random generates a secure random string of bytes. This function accepts
|
||||
# a number of secure bytes desired and returns a base64 encoded string with at
|
||||
# least the requested entropy. Rather than directly reading from /dev/urandom,
|
||||
# we use uuidgen which calls getrandom(2). getrandom(2) verifies that the
|
||||
# entropy pool has been initialized sufficiently for the desired operation
|
||||
# before reading from /dev/urandom.
|
||||
#
|
||||
# ARGS:
|
||||
# #1: number of secure bytes to generate. We round up to the nearest factor of 32.
|
||||
function secure_random {
|
||||
local infobytes="${1}"
|
||||
if ((infobytes <= 0)); then
|
||||
echo "Invalid argument to secure_random: infobytes='${infobytes}'" 1>&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
local out=""
|
||||
for (( i = 0; i < "${infobytes}"; i += 32 )); do
|
||||
# uuids have 122 random bits, sha256 sums have 256 bits, so concatenate
|
||||
# three uuids and take their sum. The sum is encoded in ASCII hex, hence the
|
||||
# 64 character cut.
|
||||
out+="$(
|
||||
(
|
||||
uuidgen --random;
|
||||
uuidgen --random;
|
||||
uuidgen --random;
|
||||
) | sha256sum \
|
||||
| head -c 64
|
||||
)";
|
||||
done
|
||||
# Finally, convert the ASCII hex to base64 to increase the density.
|
||||
echo -n "${out}" | xxd -r -p | base64 -w 0
|
||||
}
|
||||
|
||||
function config-ip-firewall {
|
||||
echo "Configuring IP firewall rules"
|
||||
|
||||
@@ -51,18 +89,20 @@ function config-ip-firewall {
|
||||
sysctl -w net.ipv4.conf.all.route_localnet=1
|
||||
|
||||
# The GCI image has host firewall which drop most inbound/forwarded packets.
|
||||
# We need to add rules to accept all TCP/UDP/ICMP packets.
|
||||
# We need to add rules to accept all TCP/UDP/ICMP/SCTP packets.
|
||||
if iptables -w -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then
|
||||
echo "Add rules to accept all inbound TCP/UDP/ICMP packets"
|
||||
iptables -A INPUT -w -p TCP -j ACCEPT
|
||||
iptables -A INPUT -w -p UDP -j ACCEPT
|
||||
iptables -A INPUT -w -p ICMP -j ACCEPT
|
||||
iptables -A INPUT -w -p SCTP -j ACCEPT
|
||||
fi
|
||||
if iptables -w -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then
|
||||
echo "Add rules to accept all forwarded TCP/UDP/ICMP packets"
|
||||
echo "Add rules to accept all forwarded TCP/UDP/ICMP/SCTP packets"
|
||||
iptables -A FORWARD -w -p TCP -j ACCEPT
|
||||
iptables -A FORWARD -w -p UDP -j ACCEPT
|
||||
iptables -A FORWARD -w -p ICMP -j ACCEPT
|
||||
iptables -A FORWARD -w -p SCTP -j ACCEPT
|
||||
fi
|
||||
|
||||
# Flush iptables nat table
|
||||
@@ -568,6 +608,12 @@ EOF
|
||||
cat <<EOF >>/etc/gce.conf
|
||||
token-url = ${TOKEN_URL}
|
||||
token-body = ${TOKEN_BODY}
|
||||
EOF
|
||||
fi
|
||||
if [[ -n "${CONTAINER_API_ENDPOINT:-}" ]]; then
|
||||
use_cloud_config="true"
|
||||
cat <<EOF >>/etc/gce.conf
|
||||
container-api-endpoint = ${CONTAINER_API_ENDPOINT}
|
||||
EOF
|
||||
fi
|
||||
if [[ -n "${PROJECT_ID:-}" ]]; then
|
||||
@@ -615,6 +661,15 @@ EOF
|
||||
use_cloud_config="true"
|
||||
cat <<EOF >>/etc/gce.conf
|
||||
multizone = ${MULTIZONE}
|
||||
EOF
|
||||
fi
|
||||
# Multimaster indicates that the cluster is HA.
|
||||
# Currently the only HA clusters are regional.
|
||||
# If we introduce zonal multimaster this will need to be revisited.
|
||||
if [[ -n "${MULTIMASTER:-}" ]]; then
|
||||
use_cloud_config="true"
|
||||
cat <<EOF >>/etc/gce.conf
|
||||
regional = ${MULTIMASTER}
|
||||
EOF
|
||||
fi
|
||||
if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then
|
||||
@@ -1152,6 +1207,7 @@ function start-kubelet {
|
||||
local -r kubelet_env_file="/etc/default/kubelet"
|
||||
local kubelet_opts="${KUBELET_ARGS} ${KUBELET_CONFIG_FILE_ARG:-}"
|
||||
echo "KUBELET_OPTS=\"${kubelet_opts}\"" > "${kubelet_env_file}"
|
||||
echo "KUBE_COVERAGE_FILE=\"/var/log/kubelet.cov\"" >> "${kubelet_env_file}"
|
||||
|
||||
# Write the systemd service file for kubelet.
|
||||
cat <<EOF >/etc/systemd/system/kubelet.service
|
||||
@@ -1170,6 +1226,7 @@ ExecStart=${kubelet_bin} \$KUBELET_OPTS
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl start kubelet.service
|
||||
}
|
||||
|
||||
@@ -1181,13 +1238,18 @@ function start-node-problem-detector {
|
||||
local -r km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor.json"
|
||||
# TODO(random-liu): Handle this for alternative container runtime.
|
||||
local -r dm_config="${KUBE_HOME}/node-problem-detector/config/docker-monitor.json"
|
||||
local -r custom_km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor-counter.json"
|
||||
echo "Using node problem detector binary at ${npd_bin}"
|
||||
local flags="${NPD_TEST_LOG_LEVEL:-"--v=2"} ${NPD_TEST_ARGS:-}"
|
||||
flags+=" --logtostderr"
|
||||
flags+=" --system-log-monitors=${km_config},${dm_config}"
|
||||
flags+=" --custom-plugin-monitors=${custom_km_config}"
|
||||
flags+=" --apiserver-override=https://${KUBERNETES_MASTER_NAME}?inClusterConfig=false&auth=/var/lib/node-problem-detector/kubeconfig"
|
||||
local -r npd_port=${NODE_PROBLEM_DETECTOR_PORT:-20256}
|
||||
flags+=" --port=${npd_port}"
|
||||
if [[ -n "${EXTRA_NPD_ARGS:-}" ]]; then
|
||||
flags+=" ${EXTRA_NPD_ARGS}"
|
||||
fi
|
||||
|
||||
# Write the systemd service file for node problem detector.
|
||||
cat <<EOF >/etc/systemd/system/node-problem-detector.service
|
||||
@@ -1256,10 +1318,6 @@ function prepare-kube-proxy-manifest-variables {
|
||||
kube_cache_mutation_detector_env_name="- name: KUBE_CACHE_MUTATION_DETECTOR"
|
||||
kube_cache_mutation_detector_env_value="value: \"${ENABLE_CACHE_MUTATION_DETECTOR}\""
|
||||
fi
|
||||
local pod_priority=""
|
||||
if [[ "${ENABLE_POD_PRIORITY:-}" == "true" ]]; then
|
||||
pod_priority="priorityClassName: system-node-critical"
|
||||
fi
|
||||
sed -i -e "s@{{kubeconfig}}@${kubeconfig}@g" ${src_file}
|
||||
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${kube_docker_registry}@g" ${src_file}
|
||||
sed -i -e "s@{{pillar\['kube-proxy_docker_tag'\]}}@${kube_proxy_docker_tag}@g" ${src_file}
|
||||
@@ -1267,7 +1325,6 @@ function prepare-kube-proxy-manifest-variables {
|
||||
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
|
||||
sed -i -e "s@{{kube_cache_mutation_detector_env_name}}@${kube_cache_mutation_detector_env_name}@g" ${src_file}
|
||||
sed -i -e "s@{{kube_cache_mutation_detector_env_value}}@${kube_cache_mutation_detector_env_value}@g" ${src_file}
|
||||
sed -i -e "s@{{pod_priority}}@${pod_priority}@g" ${src_file}
|
||||
sed -i -e "s@{{ cpurequest }}@100m@g" ${src_file}
|
||||
sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" ${src_file}
|
||||
sed -i -e "s@{{kubernetes_service_host_env_value}}@${KUBERNETES_MASTER_NAME}@g" ${src_file}
|
||||
@@ -1326,7 +1383,6 @@ function prepare-etcd-manifest {
|
||||
sed -i -e "s@{{ *cpulimit *}}@\"$4\"@g" "${temp_file}"
|
||||
sed -i -e "s@{{ *hostname *}}@$host_name@g" "${temp_file}"
|
||||
sed -i -e "s@{{ *host_ip *}}@$host_ip@g" "${temp_file}"
|
||||
sed -i -e "s@{{ *srv_kube_path *}}@/etc/srv/kubernetes@g" "${temp_file}"
|
||||
sed -i -e "s@{{ *etcd_cluster *}}@$etcd_cluster@g" "${temp_file}"
|
||||
sed -i -e "s@{{ *liveness_probe_initial_delay *}}@${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${temp_file}"
|
||||
# Get default storage backend from manifest file.
|
||||
@@ -1459,8 +1515,12 @@ function start-kube-apiserver {
|
||||
params+=" --allow-privileged=true"
|
||||
params+=" --cloud-provider=gce"
|
||||
params+=" --client-ca-file=${CA_CERT_BUNDLE_PATH}"
|
||||
params+=" --etcd-servers=http://127.0.0.1:2379"
|
||||
params+=" --etcd-servers-overrides=/events#http://127.0.0.1:4002"
|
||||
params+=" --etcd-servers=${ETCD_SERVERS:-http://127.0.0.1:2379}"
|
||||
if [[ -z "${ETCD_SERVERS:-}" ]]; then
|
||||
params+=" --etcd-servers-overrides=${ETCD_SERVERS_OVERRIDES:-/events#http://127.0.0.1:4002}"
|
||||
elif [[ -n "${ETCD_SERVERS_OVERRIDES:-}" ]]; then
|
||||
params+=" --etcd-servers-overrides=${ETCD_SERVERS_OVERRIDES:-}"
|
||||
fi
|
||||
params+=" --secure-port=443"
|
||||
params+=" --tls-cert-file=${APISERVER_SERVER_CERT_PATH}"
|
||||
params+=" --tls-private-key-file=${APISERVER_SERVER_KEY_PATH}"
|
||||
@@ -1516,9 +1576,6 @@ function start-kube-apiserver {
|
||||
if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then
|
||||
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
|
||||
fi
|
||||
if [[ -n "${ETCD_QUORUM_READ:-}" ]]; then
|
||||
params+=" --etcd-quorum-read=${ETCD_QUORUM_READ}"
|
||||
fi
|
||||
if [[ -n "${SERVICEACCOUNT_ISSUER:-}" ]]; then
|
||||
params+=" --service-account-issuer=${SERVICEACCOUNT_ISSUER}"
|
||||
params+=" --service-account-signing-key-file=${SERVICEACCOUNT_KEY_PATH}"
|
||||
@@ -1529,26 +1586,7 @@ function start-kube-apiserver {
|
||||
local audit_policy_config_volume=""
|
||||
local audit_webhook_config_mount=""
|
||||
local audit_webhook_config_volume=""
|
||||
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then
|
||||
# We currently only support enabling with a fixed path and with built-in log
|
||||
# rotation "disabled" (large value) so it behaves like kube-apiserver.log.
|
||||
# External log rotation should be set up the same as for kube-apiserver.log.
|
||||
params+=" --audit-log-path=/var/log/kube-apiserver-audit.log"
|
||||
params+=" --audit-log-maxage=0"
|
||||
params+=" --audit-log-maxbackup=0"
|
||||
# Lumberjack doesn't offer any way to disable size-based rotation. It also
|
||||
# has an in-memory counter that doesn't notice if you truncate the file.
|
||||
# 2000000000 (in MiB) is a large number that fits in 31 bits. If the log
|
||||
# grows at 10MiB/s (~30K QPS), it will rotate after ~6 years if apiserver
|
||||
# never restarts. Please manually restart apiserver before this time.
|
||||
params+=" --audit-log-maxsize=2000000000"
|
||||
# Disable AdvancedAuditing enabled by default
|
||||
if [[ -z "${FEATURE_GATES:-}" ]]; then
|
||||
FEATURE_GATES="AdvancedAuditing=false"
|
||||
else
|
||||
FEATURE_GATES="${FEATURE_GATES},AdvancedAuditing=false"
|
||||
fi
|
||||
elif [[ "${ENABLE_APISERVER_ADVANCED_AUDIT:-}" == "true" ]]; then
|
||||
if [[ "${ENABLE_APISERVER_ADVANCED_AUDIT:-}" == "true" ]]; then
|
||||
local -r audit_policy_file="/etc/audit_policy.config"
|
||||
params+=" --audit-policy-file=${audit_policy_file}"
|
||||
# Create the audit policy file, and mount it into the apiserver pod.
|
||||
@@ -1596,8 +1634,6 @@ function start-kube-apiserver {
|
||||
fi
|
||||
fi
|
||||
if [[ "${ADVANCED_AUDIT_BACKEND:-}" == *"webhook"* ]]; then
|
||||
params+=" --audit-webhook-mode=batch"
|
||||
|
||||
# Create the audit webhook config file, and mount it into the apiserver pod.
|
||||
local -r audit_webhook_config_file="/etc/audit_webhook.config"
|
||||
params+=" --audit-webhook-config-file=${audit_webhook_config_file}"
|
||||
@@ -1608,6 +1644,8 @@ function start-kube-apiserver {
|
||||
# Batching parameters
|
||||
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_MODE:-}" ]]; then
|
||||
params+=" --audit-webhook-mode=${ADVANCED_AUDIT_WEBHOOK_MODE}"
|
||||
else
|
||||
params+=" --audit-webhook-mode=batch"
|
||||
fi
|
||||
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE:-}" ]]; then
|
||||
params+=" --audit-webhook-batch-buffer-size=${ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE}"
|
||||
@@ -1773,7 +1811,6 @@ EOM
|
||||
local -r kube_apiserver_docker_tag="${KUBE_API_SERVER_DOCKER_TAG:-$(cat /home/kubernetes/kube-docker-files/kube-apiserver.docker_tag)}"
|
||||
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
|
||||
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
|
||||
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
|
||||
sed -i -e "s@{{srv_sshproxy_path}}@/etc/srv/sshproxy@g" "${src_file}"
|
||||
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
|
||||
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
|
||||
@@ -1943,7 +1980,6 @@ function start-kube-controller-manager {
|
||||
|
||||
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-controller-manager.manifest"
|
||||
# Evaluate variables.
|
||||
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
|
||||
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
|
||||
sed -i -e "s@{{pillar\['kube-controller-manager_docker_tag'\]}}@${kube_rc_docker_tag}@g" "${src_file}"
|
||||
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
|
||||
@@ -1956,6 +1992,7 @@ function start-kube-controller-manager {
|
||||
sed -i -e "s@{{pv_recycler_volume}}@${PV_RECYCLER_VOLUME}@g" "${src_file}"
|
||||
sed -i -e "s@{{flexvolume_hostpath_mount}}@${FLEXVOLUME_HOSTPATH_MOUNT}@g" "${src_file}"
|
||||
sed -i -e "s@{{flexvolume_hostpath}}@${FLEXVOLUME_HOSTPATH_VOLUME}@g" "${src_file}"
|
||||
sed -i -e "s@{{cpurequest}}@${KUBE_CONTROLLER_MANAGER_CPU_REQUEST}@g" "${src_file}"
|
||||
|
||||
cp "${src_file}" /etc/kubernetes/manifests
|
||||
}
|
||||
@@ -1990,10 +2027,10 @@ function start-kube-scheduler {
|
||||
# Remove salt comments and replace variables with values.
|
||||
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest"
|
||||
|
||||
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
|
||||
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
|
||||
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
|
||||
sed -i -e "s@{{pillar\['kube-scheduler_docker_tag'\]}}@${kube_scheduler_docker_tag}@g" "${src_file}"
|
||||
sed -i -e "s@{{cpurequest}}@${KUBE_SCHEDULER_CPU_REQUEST}@g" "${src_file}"
|
||||
cp "${src_file}" /etc/kubernetes/manifests
|
||||
}
|
||||
|
||||
@@ -2042,6 +2079,12 @@ function setup-addon-manifests {
|
||||
copy-manifests "${psp_dir}" "${dst_dir}"
|
||||
fi
|
||||
fi
|
||||
if [[ "${ENABLE_NODE_TERMINATION_HANDLER:-}" == "true" ]]; then
|
||||
local -r nth_dir="${src_dir}/${3:-$2}/node-termination-handler"
|
||||
if [[ -d "${nth_dir}" ]]; then
|
||||
copy-manifests "${nth_dir}" "${dst_dir}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# A function that downloads extra addons from a URL and puts them in the GCI
|
||||
@@ -2199,6 +2242,17 @@ function update-prometheus-to-sd-parameters {
|
||||
fi
|
||||
}
|
||||
|
||||
# Updates parameters in yaml file for prometheus-to-sd configuration in daemon sets, or
|
||||
# removes component if it is disabled.
|
||||
function update-daemon-set-prometheus-to-sd-parameters {
|
||||
if [[ "${DISABLE_PROMETHEUS_TO_SD_IN_DS:-}" == "true" ]]; then
|
||||
# Removes all lines between two patterns (throws away prometheus-to-sd)
|
||||
sed -i -e "/# BEGIN_PROMETHEUS_TO_SD/,/# END_PROMETHEUS_TO_SD/d" "$1"
|
||||
else
|
||||
update-prometheus-to-sd-parameters $1
|
||||
fi
|
||||
}
|
||||
|
||||
# Updates parameters in yaml file for event-exporter configuration
|
||||
function update-event-exporter {
|
||||
local -r stackdriver_resource_model="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}"
|
||||
@@ -2231,6 +2285,7 @@ function setup-coredns-manifest {
|
||||
function setup-fluentd {
|
||||
local -r dst_dir="$1"
|
||||
local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml"
|
||||
local -r fluentd_gcp_scaler_yaml="${dst_dir}/fluentd-gcp/scaler-deployment.yaml"
|
||||
# Ingest logs against new resources like "k8s_container" and "k8s_node" if
|
||||
# LOGGING_STACKDRIVER_RESOURCE_TYPES is "new".
|
||||
# Ingest logs against old resources like "gke_container" and "gce_instance" if
|
||||
@@ -2243,9 +2298,12 @@ function setup-fluentd {
|
||||
fluentd_gcp_configmap_name="fluentd-gcp-config-old"
|
||||
fi
|
||||
sed -i -e "s@{{ fluentd_gcp_configmap_name }}@${fluentd_gcp_configmap_name}@g" "${fluentd_gcp_yaml}"
|
||||
fluentd_gcp_version="${FLUENTD_GCP_VERSION:-0.2-1.5.30-1-k8s}"
|
||||
fluentd_gcp_yaml_version="${FLUENTD_GCP_YAML_VERSION:-v3.1.0}"
|
||||
sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_yaml}"
|
||||
sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_scaler_yaml}"
|
||||
fluentd_gcp_version="${FLUENTD_GCP_VERSION:-0.5-1.5.36-1-k8s}"
|
||||
sed -i -e "s@{{ fluentd_gcp_version }}@${fluentd_gcp_version}@g" "${fluentd_gcp_yaml}"
|
||||
update-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
|
||||
update-daemon-set-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
|
||||
start-fluentd-resource-update ${fluentd_gcp_yaml}
|
||||
update-container-runtime ${fluentd_gcp_configmap_yaml}
|
||||
update-node-journal ${fluentd_gcp_configmap_yaml}
|
||||
@@ -2258,7 +2316,7 @@ function setup-kube-dns-manifest {
|
||||
if [ -n "${CUSTOM_KUBE_DNS_YAML:-}" ]; then
|
||||
# Replace with custom GKE kube-dns deployment.
|
||||
cat > "${kubedns_file}" <<EOF
|
||||
$(echo "$CUSTOM_KUBE_DNS_YAML")
|
||||
$CUSTOM_KUBE_DNS_YAML
|
||||
EOF
|
||||
update-prometheus-to-sd-parameters ${kubedns_file}
|
||||
fi
|
||||
@@ -2281,7 +2339,24 @@ function setup-netd-manifest {
|
||||
if [ -n "${CUSTOM_NETD_YAML:-}" ]; then
|
||||
# Replace with custom GCP netd deployment.
|
||||
cat > "${netd_file}" <<EOF
|
||||
$(echo "$CUSTOM_NETD_YAML")
|
||||
$CUSTOM_NETD_YAML
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
# A helper function to set up a custom yaml for a k8s addon.
|
||||
#
|
||||
# $1: addon category under /etc/kubernetes
|
||||
# $2: manifest source dir
|
||||
# $3: manifest file
|
||||
# $4: custom yaml
|
||||
function setup-addon-custom-yaml {
|
||||
local -r manifest_path="/etc/kubernetes/$1/$2/$3"
|
||||
local -r custom_yaml="$4"
|
||||
if [ -n "${custom_yaml:-}" ]; then
|
||||
# Replace with custom manifest.
|
||||
cat > "${manifest_path}" <<EOF
|
||||
$custom_yaml
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
@@ -2312,9 +2387,9 @@ function start-kube-addons {
|
||||
if [ -n "${CUSTOM_KUBE_PROXY_YAML:-}" ]; then
|
||||
# Replace with custom GKE kube proxy.
|
||||
cat > "$src_dir/kube-proxy/kube-proxy-ds.yaml" <<EOF
|
||||
$(echo "$CUSTOM_KUBE_PROXY_YAML")
|
||||
$CUSTOM_KUBE_PROXY_YAML
|
||||
EOF
|
||||
update-prometheus-to-sd-parameters "$src_dir/kube-proxy/kube-proxy-ds.yaml"
|
||||
update-daemon-set-prometheus-to-sd-parameters "$src_dir/kube-proxy/kube-proxy-ds.yaml"
|
||||
fi
|
||||
prepare-kube-proxy-manifest-variables "$src_dir/kube-proxy/kube-proxy-ds.yaml"
|
||||
setup-addon-manifests "addons" "kube-proxy"
|
||||
@@ -2337,10 +2412,17 @@ EOF
|
||||
base_eventer_memory="190Mi"
|
||||
base_metrics_cpu="${HEAPSTER_GCP_BASE_CPU:-80m}"
|
||||
nanny_memory="90Mi"
|
||||
local -r metrics_memory_per_node="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
|
||||
local heapster_min_cluster_size="16"
|
||||
local metrics_memory_per_node="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
|
||||
local -r metrics_cpu_per_node="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
|
||||
local -r eventer_memory_per_node="500"
|
||||
local -r nanny_memory_per_node="200"
|
||||
if [[ "${ENABLE_SYSTEM_ADDON_RESOURCE_OPTIMIZATIONS:-}" == "true" ]]; then
|
||||
base_metrics_memory="${HEAPSTER_GCP_BASE_MEMORY:-100Mi}"
|
||||
base_metrics_cpu="${HEAPSTER_GCP_BASE_CPU:-10m}"
|
||||
metrics_memory_per_node="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
|
||||
heapster_min_cluster_size="5"
|
||||
fi
|
||||
if [[ -n "${NUM_NODES:-}" && "${NUM_NODES}" -ge 1 ]]; then
|
||||
num_kube_nodes="$((${NUM_NODES}+1))"
|
||||
nanny_memory="$((${num_kube_nodes} * ${nanny_memory_per_node} + 90 * 1024))Ki"
|
||||
@@ -2361,6 +2443,7 @@ EOF
|
||||
sed -i -e "s@{{ *eventer_memory_per_node *}}@${eventer_memory_per_node}@g" "${controller_yaml}"
|
||||
sed -i -e "s@{{ *nanny_memory *}}@${nanny_memory}@g" "${controller_yaml}"
|
||||
sed -i -e "s@{{ *metrics_cpu_per_node *}}@${metrics_cpu_per_node}@g" "${controller_yaml}"
|
||||
sed -i -e "s@{{ *heapster_min_cluster_size *}}@${heapster_min_cluster_size}@g" "${controller_yaml}"
|
||||
update-prometheus-to-sd-parameters ${controller_yaml}
|
||||
|
||||
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "stackdriver" ]]; then
|
||||
@@ -2388,10 +2471,29 @@ EOF
|
||||
fi
|
||||
if [[ "${ENABLE_METRICS_SERVER:-}" == "true" ]]; then
|
||||
setup-addon-manifests "addons" "metrics-server"
|
||||
base_metrics_server_cpu="40m"
|
||||
base_metrics_server_memory="40Mi"
|
||||
metrics_server_memory_per_node="4"
|
||||
metrics_server_min_cluster_size="16"
|
||||
if [[ "${ENABLE_SYSTEM_ADDON_RESOURCE_OPTIMIZATIONS:-}" == "true" ]]; then
|
||||
base_metrics_server_cpu="40m"
|
||||
base_metrics_server_memory="35Mi"
|
||||
metrics_server_memory_per_node="4"
|
||||
metrics_server_min_cluster_size="5"
|
||||
fi
|
||||
local -r metrics_server_yaml="${dst_dir}/metrics-server/metrics-server-deployment.yaml"
|
||||
sed -i -e "s@{{ base_metrics_server_cpu }}@${base_metrics_server_cpu}@g" "${metrics_server_yaml}"
|
||||
sed -i -e "s@{{ base_metrics_server_memory }}@${base_metrics_server_memory}@g" "${metrics_server_yaml}"
|
||||
sed -i -e "s@{{ metrics_server_memory_per_node }}@${metrics_server_memory_per_node}@g" "${metrics_server_yaml}"
|
||||
sed -i -e "s@{{ metrics_server_min_cluster_size }}@${metrics_server_min_cluster_size}@g" "${metrics_server_yaml}"
|
||||
fi
|
||||
if [[ "${ENABLE_NVIDIA_GPU_DEVICE_PLUGIN:-}" == "true" ]]; then
|
||||
setup-addon-manifests "addons" "device-plugins/nvidia-gpu"
|
||||
fi
|
||||
if [[ "${ENABLE_NODE_TERMINATION_HANDLER:-}" == "true" ]]; then
|
||||
setup-addon-manifests "addons" "node-termination-handler"
|
||||
setup-node-termination-handler-manifest
|
||||
fi
|
||||
if [[ "${ENABLE_CLUSTER_DNS:-}" == "true" ]]; then
|
||||
if [[ "${CLUSTER_DNS_CORE_DNS:-}" == "true" ]]; then
|
||||
setup-addon-manifests "addons" "dns/coredns"
|
||||
@@ -2437,6 +2539,9 @@ EOF
|
||||
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
|
||||
setup-addon-manifests "addons" "calico-policy-controller"
|
||||
|
||||
setup-addon-custom-yaml "addons" "calico-policy-controller" "calico-node-daemonset.yaml" "${CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}"
|
||||
setup-addon-custom-yaml "addons" "calico-policy-controller" "typha-deployment.yaml" "${CUSTOM_TYPHA_DEPLOYMENT_YAML:-}"
|
||||
|
||||
# Configure Calico CNI directory.
|
||||
local -r ds_file="${dst_dir}/calico-policy-controller/calico-node-daemonset.yaml"
|
||||
sed -i -e "s@__CALICO_CNI_DIR__@/home/kubernetes/bin@g" "${ds_file}"
|
||||
@@ -2450,7 +2555,7 @@ EOF
|
||||
if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]]; then
|
||||
setup-addon-manifests "addons" "metadata-proxy/gce"
|
||||
local -r metadata_proxy_yaml="${dst_dir}/metadata-proxy/gce/metadata-proxy.yaml"
|
||||
update-prometheus-to-sd-parameters ${metadata_proxy_yaml}
|
||||
update-daemon-set-prometheus-to-sd-parameters ${metadata_proxy_yaml}
|
||||
fi
|
||||
if [[ "${ENABLE_ISTIO:-}" == "true" ]]; then
|
||||
if [[ "${ISTIO_AUTH_TYPE:-}" == "MUTUAL_TLS" ]]; then
|
||||
@@ -2459,21 +2564,26 @@ EOF
|
||||
setup-addon-manifests "addons" "istio/noauth"
|
||||
fi
|
||||
fi
|
||||
if [[ "${FEATURE_GATES:-}" =~ "RuntimeClass=true" ]]; then
|
||||
setup-addon-manifests "addons" "runtimeclass"
|
||||
fi
|
||||
if [[ -n "${EXTRA_ADDONS_URL:-}" ]]; then
|
||||
download-extra-addons
|
||||
setup-addon-manifests "addons" "gce-extras"
|
||||
fi
|
||||
|
||||
|
||||
# Place addon manager pod manifest.
|
||||
cp "${src_dir}/kube-addon-manager.yaml" /etc/kubernetes/manifests
|
||||
src_file="${src_dir}/kube-addon-manager.yaml"
|
||||
sed -i -e "s@{{kubectl_extra_prune_whitelist}}@${ADDON_MANAGER_PRUNE_WHITELIST:-}@g" "${src_file}"
|
||||
cp "${src_file}" /etc/kubernetes/manifests
|
||||
}
|
||||
|
||||
# Starts an image-puller - used in test clusters.
|
||||
function start-image-puller {
|
||||
echo "Start image-puller"
|
||||
local -r e2e_image_puller_manifest="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/e2e-image-puller.manifest"
|
||||
update-container-runtime "${e2e_image_puller_manifest}"
|
||||
cp "${e2e_image_puller_manifest}" /etc/kubernetes/manifests/
|
||||
function setup-node-termination-handler-manifest {
|
||||
local -r nth_manifest="/etc/kubernetes/$1/$2/daemonset.yaml"
|
||||
if [[ -n "${NODE_TERMINATION_HANDLER_IMAGE}" ]]; then
|
||||
sed -i "s|image:.*|image: ${NODE_TERMINATION_HANDLER_IMAGE}|" "${nth_manifest}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Setups manifests for ingress controller and gce-specific policies for service controller.
|
||||
@@ -2502,16 +2612,6 @@ function start-lb-controller {
|
||||
fi
|
||||
}
|
||||
|
||||
# Starts rescheduler.
|
||||
function start-rescheduler {
|
||||
if [[ "${ENABLE_RESCHEDULER:-}" == "true" ]]; then
|
||||
echo "Start Rescheduler"
|
||||
prepare-log-file /var/log/rescheduler.log
|
||||
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/rescheduler.manifest" \
|
||||
/etc/kubernetes/manifests/
|
||||
fi
|
||||
}
|
||||
|
||||
# Setup working directory for kubelet.
|
||||
function setup-kubelet-dir {
|
||||
echo "Making /var/lib/kubelet executable for kubelet"
|
||||
@@ -2519,6 +2619,15 @@ function setup-kubelet-dir {
|
||||
mount -B -o remount,exec,suid,dev /var/lib/kubelet
|
||||
}
|
||||
|
||||
# Override for GKE custom master setup scripts (no-op outside of GKE).
|
||||
function gke-master-start {
|
||||
if [[ -e "${KUBE_HOME}/bin/gke-internal-configure-helper.sh" ]]; then
|
||||
echo "Running GKE internal configuration script"
|
||||
. "${KUBE_HOME}/bin/gke-internal-configure-helper.sh"
|
||||
gke-internal-master-start
|
||||
fi
|
||||
}
|
||||
|
||||
function reset-motd {
|
||||
# kubelet is installed both on the master and nodes, and the version is easy to parse (unlike kubectl)
|
||||
local -r version="$("${KUBE_HOME}"/bin/kubelet --version=true | cut -f2 -d " ")"
|
||||
@@ -2637,9 +2746,9 @@ function main() {
|
||||
fi
|
||||
|
||||
# generate the controller manager, scheduler and cluster autoscaler tokens here since they are only used on the master.
|
||||
KUBE_CONTROLLER_MANAGER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
KUBE_SCHEDULER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
KUBE_CLUSTER_AUTOSCALER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
KUBE_CONTROLLER_MANAGER_TOKEN="$(secure_random 32)"
|
||||
KUBE_SCHEDULER_TOKEN="$(secure_random 32)"
|
||||
KUBE_CLUSTER_AUTOSCALER_TOKEN="$(secure_random 32)"
|
||||
|
||||
setup-os-params
|
||||
config-ip-firewall
|
||||
@@ -2655,6 +2764,7 @@ function main() {
|
||||
create-master-kubelet-auth
|
||||
create-master-etcd-auth
|
||||
override-pv-recycler
|
||||
gke-master-start
|
||||
else
|
||||
create-node-pki
|
||||
create-kubelet-kubeconfig ${KUBERNETES_MASTER_NAME}
|
||||
@@ -2683,14 +2793,10 @@ function main() {
|
||||
start-kube-addons
|
||||
start-cluster-autoscaler
|
||||
start-lb-controller
|
||||
start-rescheduler
|
||||
else
|
||||
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
|
||||
start-kube-proxy
|
||||
fi
|
||||
if [[ "${PREPULL_E2E_IMAGES:-}" == "true" ]]; then
|
||||
start-image-puller
|
||||
fi
|
||||
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
|
||||
start-node-problem-detector
|
||||
fi
|
||||
|
25
vendor/k8s.io/kubernetes/cluster/gce/gci/configure.sh
generated
vendored
25
vendor/k8s.io/kubernetes/cluster/gce/gci/configure.sh
generated
vendored
@@ -26,10 +26,10 @@ set -o pipefail
|
||||
### Hardcoded constants
|
||||
DEFAULT_CNI_VERSION="v0.6.0"
|
||||
DEFAULT_CNI_SHA1="d595d3ded6499a64e8dac02466e2f5f2ce257c9f"
|
||||
DEFAULT_NPD_VERSION="v0.4.1"
|
||||
DEFAULT_NPD_SHA1="a57a3fe64cab8a18ec654f5cef0aec59dae62568"
|
||||
DEFAULT_CRICTL_VERSION="v1.11.0"
|
||||
DEFAULT_CRICTL_SHA1="8f5142b985d314cdebb51afd55054d5ec00c442a"
|
||||
DEFAULT_NPD_VERSION="v0.5.0"
|
||||
DEFAULT_NPD_SHA1="650ecfb2ae495175ee43706d0bd862a1ea7f1395"
|
||||
DEFAULT_CRICTL_VERSION="v1.12.0"
|
||||
DEFAULT_CRICTL_SHA1="82ef8b44849f9da0589c87e9865d4716573eec7f"
|
||||
DEFAULT_MOUNTER_TAR_SHA="8003b798cf33c7f91320cd6ee5cec4fa22244571"
|
||||
###
|
||||
|
||||
@@ -247,6 +247,11 @@ function install-crictl {
|
||||
fi
|
||||
local -r crictl="crictl-${crictl_version}-linux-amd64"
|
||||
|
||||
# Create crictl config file.
|
||||
cat > /etc/crictl.yaml <<EOF
|
||||
runtime-endpoint: ${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}
|
||||
EOF
|
||||
|
||||
if is-preloaded "${crictl}" "${crictl_sha1}"; then
|
||||
echo "crictl is preloaded"
|
||||
return
|
||||
@@ -257,11 +262,6 @@ function install-crictl {
|
||||
download-or-bust "${crictl_sha1}" "${crictl_path}/${crictl}"
|
||||
mv "${KUBE_HOME}/${crictl}" "${KUBE_BIN}/crictl"
|
||||
chmod a+x "${KUBE_BIN}/crictl"
|
||||
|
||||
# Create crictl config file.
|
||||
cat > /etc/crictl.yaml <<EOF
|
||||
runtime-endpoint: ${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}
|
||||
EOF
|
||||
}
|
||||
|
||||
function install-exec-auth-plugin {
|
||||
@@ -273,7 +273,8 @@ function install-exec-auth-plugin {
|
||||
|
||||
echo "Downloading gke-exec-auth-plugin binary"
|
||||
download-or-bust "${plugin_sha1}" "${plugin_url}"
|
||||
mv "${KUBE_HOME}/gke-exec-auth-plugin" "${KUBE_BIN}"
|
||||
mv "${KUBE_HOME}/gke-exec-auth-plugin" "${KUBE_BIN}/gke-exec-auth-plugin"
|
||||
chmod a+x "${KUBE_BIN}/gke-exec-auth-plugin"
|
||||
}
|
||||
|
||||
function install-kube-manifests {
|
||||
@@ -306,6 +307,10 @@ function install-kube-manifests {
|
||||
xargs sed -ri "s@(image\":\s+\")k8s.gcr.io@\1${kube_addon_registry}@"
|
||||
fi
|
||||
cp "${dst_dir}/kubernetes/gci-trusty/gci-configure-helper.sh" "${KUBE_BIN}/configure-helper.sh"
|
||||
if [[ -e "${dst_dir}/kubernetes/gci-trusty/gke-internal-configure-helper.sh" ]]; then
|
||||
cp "${dst_dir}/kubernetes/gci-trusty/gke-internal-configure-helper.sh" "${KUBE_BIN}/"
|
||||
fi
|
||||
|
||||
cp "${dst_dir}/kubernetes/gci-trusty/health-monitor.sh" "${KUBE_BIN}/health-monitor.sh"
|
||||
|
||||
rm -f "${KUBE_HOME}/${manifests_tar}"
|
||||
|
4
vendor/k8s.io/kubernetes/cluster/gce/manifests/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/cluster/gce/manifests/BUILD
generated
vendored
@@ -15,7 +15,6 @@ filegroup(
|
||||
srcs = [
|
||||
"abac-authz-policy.jsonl",
|
||||
"cluster-autoscaler.manifest",
|
||||
"e2e-image-puller.manifest",
|
||||
"etcd.manifest",
|
||||
"etcd-empty-dir-cleanup.yaml",
|
||||
"glbc.manifest",
|
||||
@@ -25,8 +24,7 @@ filegroup(
|
||||
"kube-controller-manager.manifest",
|
||||
"kube-proxy.manifest",
|
||||
"kube-scheduler.manifest",
|
||||
"rescheduler.manifest",
|
||||
],
|
||||
] + glob(["internal-*"]),
|
||||
)
|
||||
|
||||
filegroup(
|
||||
|
3
vendor/k8s.io/kubernetes/cluster/gce/manifests/cluster-autoscaler.manifest
generated
vendored
3
vendor/k8s.io/kubernetes/cluster/gce/manifests/cluster-autoscaler.manifest
generated
vendored
@@ -17,7 +17,7 @@
|
||||
"containers": [
|
||||
{
|
||||
"name": "cluster-autoscaler",
|
||||
"image": "k8s.gcr.io/cluster-autoscaler:v1.3.0",
|
||||
"image": "k8s.gcr.io/cluster-autoscaler:v1.12.0",
|
||||
"livenessProbe": {
|
||||
"httpGet": {
|
||||
"path": "/health-check",
|
||||
@@ -33,6 +33,7 @@
|
||||
"--logtostderr=true",
|
||||
"--write-status-configmap=true",
|
||||
"--balance-similar-node-groups=true",
|
||||
"--expendable-pods-priority-cutoff=-10",
|
||||
"{{params}}"
|
||||
],
|
||||
"env": [
|
||||
|
117
vendor/k8s.io/kubernetes/cluster/gce/manifests/e2e-image-puller.manifest
generated
vendored
117
vendor/k8s.io/kubernetes/cluster/gce/manifests/e2e-image-puller.manifest
generated
vendored
@@ -1,117 +0,0 @@
|
||||
# e2e-image-puller seeds nodes in an e2e cluster with test images.
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: e2e-image-puller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
name: e2e-image-puller
|
||||
spec:
|
||||
containers:
|
||||
- name: image-puller
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
limits:
|
||||
cpu: 100m
|
||||
image: k8s.gcr.io/busybox:1.24
|
||||
# TODO: Replace this with a go script that pulls in parallel?
|
||||
# Currently it takes ~5m to pull all e2e images, so this is OK, and
|
||||
# fewer moving parts is always better.
|
||||
# TODO: Replace the hardcoded image list with an autogen list; the list is
|
||||
# currently hard-coded for static verification. It was generated via:
|
||||
# grep -Iiroh "gcr.io/.*" "${KUBE_ROOT}/test/e2e" | \
|
||||
# sed -e "s/[,\")}]//g" | awk '{print $1}' | sort | uniq | tr '\n' ' '
|
||||
# We always want the subshell to exit 0 so this pod doesn't end up
|
||||
# blocking tests in an Error state.
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- >
|
||||
for i in
|
||||
k8s.gcr.io/alpine-with-bash:1.0
|
||||
k8s.gcr.io/apparmor-loader:0.1
|
||||
k8s.gcr.io/busybox:1.24
|
||||
k8s.gcr.io/dnsutils:e2e
|
||||
k8s.gcr.io/e2e-net-amd64:1.0
|
||||
k8s.gcr.io/echoserver:1.10
|
||||
k8s.gcr.io/eptest:0.1
|
||||
k8s.gcr.io/fakegitserver:0.1
|
||||
k8s.gcr.io/galera-install:0.1
|
||||
k8s.gcr.io/invalid-image:invalid-tag
|
||||
k8s.gcr.io/iperf:e2e
|
||||
k8s.gcr.io/jessie-dnsutils:e2e
|
||||
k8s.gcr.io/k8s-dns-dnsmasq-amd64:1.14.5
|
||||
k8s.gcr.io/liveness:e2e
|
||||
k8s.gcr.io/logs-generator:v0.1.0
|
||||
k8s.gcr.io/mounttest:0.8
|
||||
k8s.gcr.io/mounttest-user:0.5
|
||||
k8s.gcr.io/mysql-galera:e2e
|
||||
k8s.gcr.io/mysql-healthz:1.0
|
||||
k8s.gcr.io/netexec:1.4
|
||||
k8s.gcr.io/netexec:1.5
|
||||
k8s.gcr.io/netexec:1.7
|
||||
k8s.gcr.io/nettest:1.7
|
||||
k8s.gcr.io/nginx:1.7.9
|
||||
k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.1
|
||||
k8s.gcr.io/nginx-slim:0.7
|
||||
k8s.gcr.io/nginx-slim:0.8
|
||||
k8s.gcr.io/node-problem-detector:v0.3.0
|
||||
k8s.gcr.io/pause
|
||||
k8s.gcr.io/porter:4524579c0eb935c056c8e75563b4e1eda31587e0
|
||||
k8s.gcr.io/portforwardtester:1.2
|
||||
k8s.gcr.io/redis-install-3.2.0:e2e
|
||||
k8s.gcr.io/resource_consumer:beta4
|
||||
k8s.gcr.io/resource_consumer/controller:beta4
|
||||
gcr.io/kubernetes-e2e-test-images/serve-hostname-amd64:1.1
|
||||
gcr.io/kubernetes-e2e-test-images/hostexec-amd64:1.1
|
||||
k8s.gcr.io/servicelb:0.1
|
||||
k8s.gcr.io/test-webserver:e2e
|
||||
k8s.gcr.io/update-demo:kitten
|
||||
k8s.gcr.io/update-demo:nautilus
|
||||
gcr.io/kubernetes-e2e-test-images/volume-ceph:0.1
|
||||
gcr.io/kubernetes-e2e-test-images/volume-gluster:0.2
|
||||
gcr.io/kubernetes-e2e-test-images/volume-iscsi:0.1
|
||||
gcr.io/kubernetes-e2e-test-images/volume-nfs:0.8
|
||||
gcr.io/kubernetes-e2e-test-images/volume-rbd:0.1
|
||||
k8s.gcr.io/zookeeper-install-3.5.0-alpha:e2e
|
||||
gcr.io/google_samples/gb-redisslave:nonexistent
|
||||
; do echo $(date '+%X') pulling $i; crictl pull $i 1>/dev/null; done; exit 0;
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: {{ container_runtime_endpoint }}
|
||||
name: socket
|
||||
- mountPath: /usr/bin/crictl
|
||||
name: crictl
|
||||
- mountPath: /etc/crictl.yaml
|
||||
name: config
|
||||
# Add a container that runs a health-check
|
||||
- name: nethealth-check
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
limits:
|
||||
cpu: 100m
|
||||
image: k8s.gcr.io/kube-nethealth-amd64:1.0
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "/usr/bin/nethealth || true"
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: {{ container_runtime_endpoint }}
|
||||
type: Socket
|
||||
name: socket
|
||||
- hostPath:
|
||||
path: /home/kubernetes/bin/crictl
|
||||
type: File
|
||||
name: crictl
|
||||
- hostPath:
|
||||
path: /etc/crictl.yaml
|
||||
type: File
|
||||
name: config
|
||||
# This pod is really fire-and-forget.
|
||||
restartPolicy: OnFailure
|
||||
# This pod needs hostNetworking for true VM perf measurement as well as avoiding cbr0 issues
|
||||
hostNetwork: true
|
2
vendor/k8s.io/kubernetes/cluster/gce/manifests/etcd-empty-dir-cleanup.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/gce/manifests/etcd-empty-dir-cleanup.yaml
generated
vendored
@@ -14,4 +14,4 @@ spec:
|
||||
dnsPolicy: Default
|
||||
containers:
|
||||
- name: etcd-empty-dir-cleanup
|
||||
image: k8s.gcr.io/etcd-empty-dir-cleanup:3.2.18.0
|
||||
image: k8s.gcr.io/etcd-empty-dir-cleanup:3.2.24.0
|
||||
|
8
vendor/k8s.io/kubernetes/cluster/gce/manifests/etcd.manifest
generated
vendored
8
vendor/k8s.io/kubernetes/cluster/gce/manifests/etcd.manifest
generated
vendored
@@ -14,7 +14,7 @@
|
||||
"containers":[
|
||||
{
|
||||
"name": "etcd-container",
|
||||
"image": "{{ pillar.get('etcd_docker_repository', 'k8s.gcr.io/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.2.18-0') }}",
|
||||
"image": "{{ pillar.get('etcd_docker_repository', 'k8s.gcr.io/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.2.24-1') }}",
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": {{ cpulimit }}
|
||||
@@ -30,7 +30,7 @@
|
||||
"value": "{{ pillar.get('storage_backend', 'etcd3') }}"
|
||||
},
|
||||
{ "name": "TARGET_VERSION",
|
||||
"value": "{{ pillar.get('etcd_version', '3.2.18') }}"
|
||||
"value": "{{ pillar.get('etcd_version', '3.2.24') }}"
|
||||
},
|
||||
{ "name": "DATA_DIRECTORY",
|
||||
"value": "/var/etcd/data{{ suffix }}"
|
||||
@@ -80,7 +80,7 @@
|
||||
"readOnly": false
|
||||
},
|
||||
{ "name": "etc",
|
||||
"mountPath": "{{ srv_kube_path }}",
|
||||
"mountPath": "/etc/srv/kubernetes",
|
||||
"readOnly": false
|
||||
}
|
||||
]
|
||||
@@ -98,7 +98,7 @@
|
||||
},
|
||||
{ "name": "etc",
|
||||
"hostPath": {
|
||||
"path": "{{ srv_kube_path }}"}
|
||||
"path": "/etc/srv/kubernetes"}
|
||||
}
|
||||
]
|
||||
}}
|
||||
|
8
vendor/k8s.io/kubernetes/cluster/gce/manifests/glbc.manifest
generated
vendored
8
vendor/k8s.io/kubernetes/cluster/gce/manifests/glbc.manifest
generated
vendored
@@ -1,20 +1,20 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: l7-lb-controller-v1.1.1
|
||||
name: l7-lb-controller-v1.2.3
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
labels:
|
||||
k8s-app: gcp-lb-controller
|
||||
version: v1.1.1
|
||||
version: v1.2.3
|
||||
kubernetes.io/name: "GLBC"
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 600
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- image: k8s.gcr.io/ingress-gce-glbc-amd64:v1.1.1
|
||||
- image: k8s.gcr.io/ingress-gce-glbc-amd64:v1.2.3
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
@@ -45,7 +45,7 @@ spec:
|
||||
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
|
||||
- sh
|
||||
- -c
|
||||
- 'exec /glbc --gce-ratelimit=ga.Operations.Get,qps,10,100 --gce-ratelimit=alpha.Operations.Get,qps,10,100 --gce-ratelimit=ga.BackendServices.Get,qps,1.8,1 --gce-ratelimit=ga.HealthChecks.Get,qps,1.8,1 --gce-ratelimit=alpha.HealthChecks.Get,qps,1.8,1 --verbose --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1'
|
||||
- 'exec /glbc --gce-ratelimit=ga.Operations.Get,qps,10,100 --gce-ratelimit=alpha.Operations.Get,qps,10,100 --gce-ratelimit=ga.BackendServices.Get,qps,1.8,1 --gce-ratelimit=ga.HealthChecks.Get,qps,1.8,1 --gce-ratelimit=alpha.HealthChecks.Get,qps,1.8,1 --gce-ratelimit=beta.NetworkEndpointGroups.Get,qps,1.8,1 --gce-ratelimit=beta.NetworkEndpointGroups.AttachNetworkEndpoints,qps,1.8,1 --gce-ratelimit=beta.NetworkEndpointGroups.DetachNetworkEndpoints,qps,1.8,1 --gce-ratelimit=beta.NetworkEndpointGroups.ListNetworkEndpoints,qps,1.8,1 --verbose --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1'
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /etc/gce.conf
|
||||
|
5
vendor/k8s.io/kubernetes/cluster/gce/manifests/kube-addon-manager.yaml
generated
vendored
5
vendor/k8s.io/kubernetes/cluster/gce/manifests/kube-addon-manager.yaml
generated
vendored
@@ -14,7 +14,7 @@ spec:
|
||||
- name: kube-addon-manager
|
||||
# When updating version also bump it in:
|
||||
# - test/kubemark/resources/manifests/kube-addon-manager.yaml
|
||||
image: k8s.gcr.io/kube-addon-manager:v8.6
|
||||
image: k8s.gcr.io/kube-addon-manager:v8.7
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
@@ -30,6 +30,9 @@ spec:
|
||||
- mountPath: /var/log
|
||||
name: varlog
|
||||
readOnly: false
|
||||
env:
|
||||
- name: KUBECTL_EXTRA_PRUNE_WHITELIST
|
||||
value: {{kubectl_extra_prune_whitelist}}
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /etc/kubernetes/
|
||||
|
4
vendor/k8s.io/kubernetes/cluster/gce/manifests/kube-apiserver.manifest
generated
vendored
4
vendor/k8s.io/kubernetes/cluster/gce/manifests/kube-apiserver.manifest
generated
vendored
@@ -60,7 +60,7 @@
|
||||
{{admission_controller_config_mount}}
|
||||
{{image_policy_webhook_config_mount}}
|
||||
{ "name": "srvkube",
|
||||
"mountPath": "{{srv_kube_path}}",
|
||||
"mountPath": "/etc/srv/kubernetes",
|
||||
"readOnly": true},
|
||||
{ "name": "logfile",
|
||||
"mountPath": "/var/log/kube-apiserver.log",
|
||||
@@ -102,7 +102,7 @@
|
||||
{{image_policy_webhook_config_volume}}
|
||||
{ "name": "srvkube",
|
||||
"hostPath": {
|
||||
"path": "{{srv_kube_path}}"}
|
||||
"path": "/etc/srv/kubernetes"}
|
||||
},
|
||||
{ "name": "logfile",
|
||||
"hostPath": {
|
||||
|
6
vendor/k8s.io/kubernetes/cluster/gce/manifests/kube-controller-manager.manifest
generated
vendored
6
vendor/k8s.io/kubernetes/cluster/gce/manifests/kube-controller-manager.manifest
generated
vendored
@@ -21,7 +21,7 @@
|
||||
"image": "{{pillar['kube_docker_registry']}}/kube-controller-manager:{{pillar['kube-controller-manager_docker_tag']}}",
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": "200m"
|
||||
"cpu": "{{cpurequest}}"
|
||||
}
|
||||
},
|
||||
"command": [
|
||||
@@ -44,7 +44,7 @@
|
||||
{{additional_cloud_config_mount}}
|
||||
{{pv_recycler_mount}}
|
||||
{ "name": "srvkube",
|
||||
"mountPath": "{{srv_kube_path}}",
|
||||
"mountPath": "/etc/srv/kubernetes",
|
||||
"readOnly": true},
|
||||
{{flexvolume_hostpath_mount}}
|
||||
{ "name": "logfile",
|
||||
@@ -74,7 +74,7 @@
|
||||
{{pv_recycler_volume}}
|
||||
{ "name": "srvkube",
|
||||
"hostPath": {
|
||||
"path": "{{srv_kube_path}}"}
|
||||
"path": "/etc/srv/kubernetes"}
|
||||
},
|
||||
{{flexvolume_hostpath}}
|
||||
{ "name": "logfile",
|
||||
|
5
vendor/k8s.io/kubernetes/cluster/gce/manifests/kube-proxy.manifest
generated
vendored
5
vendor/k8s.io/kubernetes/cluster/gce/manifests/kube-proxy.manifest
generated
vendored
@@ -6,15 +6,14 @@ metadata:
|
||||
# This annotation ensures that kube-proxy does not get evicted if the node
|
||||
# supports critical pod annotation based priority scheme.
|
||||
# Note that kube-proxy runs as a static pod so this annotation does NOT have
|
||||
# any effect on rescheduler (default scheduler and rescheduler are not
|
||||
# involved in scheduling kube-proxy).
|
||||
# any effect on default scheduler which scheduling kube-proxy.
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
labels:
|
||||
tier: node
|
||||
component: kube-proxy
|
||||
spec:
|
||||
{{pod_priority}}
|
||||
priorityClassName: system-node-critical
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- operator: "Exists"
|
||||
|
6
vendor/k8s.io/kubernetes/cluster/gce/manifests/kube-scheduler.manifest
generated
vendored
6
vendor/k8s.io/kubernetes/cluster/gce/manifests/kube-scheduler.manifest
generated
vendored
@@ -21,7 +21,7 @@
|
||||
"image": "{{pillar['kube_docker_registry']}}/kube-scheduler:{{pillar['kube-scheduler_docker_tag']}}",
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": "75m"
|
||||
"cpu": "{{cpurequest}}"
|
||||
}
|
||||
},
|
||||
"command": [
|
||||
@@ -46,7 +46,7 @@
|
||||
},
|
||||
{
|
||||
"name": "srvkube",
|
||||
"mountPath": "{{srv_kube_path}}",
|
||||
"mountPath": "/etc/srv/kubernetes",
|
||||
"readOnly": true
|
||||
}
|
||||
]
|
||||
@@ -55,7 +55,7 @@
|
||||
"volumes":[
|
||||
{
|
||||
"name": "srvkube",
|
||||
"hostPath": {"path": "{{srv_kube_path}}"}
|
||||
"hostPath": {"path": "/etc/srv/kubernetes"}
|
||||
},
|
||||
{
|
||||
"name": "logfile",
|
||||
|
36
vendor/k8s.io/kubernetes/cluster/gce/manifests/rescheduler.manifest
generated
vendored
36
vendor/k8s.io/kubernetes/cluster/gce/manifests/rescheduler.manifest
generated
vendored
@@ -1,36 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: rescheduler-v0.4.0
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
labels:
|
||||
k8s-app: rescheduler
|
||||
version: v0.4.0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "Rescheduler"
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- image: k8s.gcr.io/rescheduler:v0.4.0
|
||||
name: rescheduler
|
||||
volumeMounts:
|
||||
- mountPath: /var/log/rescheduler.log
|
||||
name: logfile
|
||||
readOnly: false
|
||||
# TODO: Make resource requirements depend on the size of the cluster
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 100Mi
|
||||
command:
|
||||
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
|
||||
- sh
|
||||
- -c
|
||||
- 'exec /rescheduler --running-in-cluster=false 1>>/var/log/rescheduler.log 2>&1'
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/log/rescheduler.log
|
||||
type: FileOrCreate
|
||||
name: logfile
|
4
vendor/k8s.io/kubernetes/cluster/gce/upgrade-aliases.sh
generated
vendored
4
vendor/k8s.io/kubernetes/cluster/gce/upgrade-aliases.sh
generated
vendored
@@ -161,8 +161,8 @@ export KUBE_GCE_ENABLE_IP_ALIASES=true
|
||||
export SECONDARY_RANGE_NAME="pods-default"
|
||||
export STORAGE_BACKEND="etcd3"
|
||||
export STORAGE_MEDIA_TYPE="application/vnd.kubernetes.protobuf"
|
||||
export ETCD_IMAGE=3.2.18-0
|
||||
export ETCD_VERSION=3.2.18
|
||||
export ETCD_IMAGE=3.2.24-1
|
||||
export ETCD_VERSION=3.2.24
|
||||
|
||||
# Upgrade master with updated kube envs
|
||||
${KUBE_ROOT}/cluster/gce/upgrade.sh -M -l
|
||||
|
126
vendor/k8s.io/kubernetes/cluster/gce/util.sh
generated
vendored
126
vendor/k8s.io/kubernetes/cluster/gce/util.sh
generated
vendored
@@ -109,6 +109,20 @@ function split_csv() {
|
||||
# Verify prereqs
|
||||
function verify-prereqs() {
|
||||
local cmd
|
||||
|
||||
# we use openssl to generate certs
|
||||
kube::util::test_openssl_installed
|
||||
|
||||
# ensure a version supported by easyrsa is installed
|
||||
if [ "$(openssl version | cut -d\ -f1)" == "LibreSSL" ]; then
|
||||
echo "LibreSSL is not supported. Please ensure openssl points to an OpenSSL binary"
|
||||
if [ "$(uname -s)" == "Darwin" ]; then
|
||||
echo 'On macOS we recommend using homebrew and adding "$(brew --prefix openssl)/bin" to your PATH'
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# we use gcloud to create the cluster, gsutil to stage binaries and data
|
||||
for cmd in gcloud gsutil; do
|
||||
if ! which "${cmd}" >/dev/null; then
|
||||
local resp="n"
|
||||
@@ -830,7 +844,6 @@ ENABLE_NODE_PROBLEM_DETECTOR: $(yaml-quote ${ENABLE_NODE_PROBLEM_DETECTOR:-none}
|
||||
NODE_PROBLEM_DETECTOR_VERSION: $(yaml-quote ${NODE_PROBLEM_DETECTOR_VERSION:-})
|
||||
NODE_PROBLEM_DETECTOR_TAR_HASH: $(yaml-quote ${NODE_PROBLEM_DETECTOR_TAR_HASH:-})
|
||||
ENABLE_NODE_LOGGING: $(yaml-quote ${ENABLE_NODE_LOGGING:-false})
|
||||
ENABLE_RESCHEDULER: $(yaml-quote ${ENABLE_RESCHEDULER:-false})
|
||||
LOGGING_DESTINATION: $(yaml-quote ${LOGGING_DESTINATION:-})
|
||||
ELASTICSEARCH_LOGGING_REPLICAS: $(yaml-quote ${ELASTICSEARCH_LOGGING_REPLICAS:-})
|
||||
ENABLE_CLUSTER_DNS: $(yaml-quote ${ENABLE_CLUSTER_DNS:-false})
|
||||
@@ -851,7 +864,6 @@ KUBELET_CERT: $(yaml-quote ${KUBELET_CERT_BASE64:-})
|
||||
KUBELET_KEY: $(yaml-quote ${KUBELET_KEY_BASE64:-})
|
||||
NETWORK_PROVIDER: $(yaml-quote ${NETWORK_PROVIDER:-})
|
||||
NETWORK_POLICY_PROVIDER: $(yaml-quote ${NETWORK_POLICY_PROVIDER:-})
|
||||
PREPULL_E2E_IMAGES: $(yaml-quote ${PREPULL_E2E_IMAGES:-})
|
||||
HAIRPIN_MODE: $(yaml-quote ${HAIRPIN_MODE:-})
|
||||
E2E_STORAGE_TEST_ENVIRONMENT: $(yaml-quote ${E2E_STORAGE_TEST_ENVIRONMENT:-})
|
||||
KUBE_DOCKER_REGISTRY: $(yaml-quote ${KUBE_DOCKER_REGISTRY:-})
|
||||
@@ -859,13 +871,12 @@ KUBE_ADDON_REGISTRY: $(yaml-quote ${KUBE_ADDON_REGISTRY:-})
|
||||
MULTIZONE: $(yaml-quote ${MULTIZONE:-})
|
||||
NON_MASQUERADE_CIDR: $(yaml-quote ${NON_MASQUERADE_CIDR:-})
|
||||
ENABLE_DEFAULT_STORAGE_CLASS: $(yaml-quote ${ENABLE_DEFAULT_STORAGE_CLASS:-})
|
||||
ENABLE_APISERVER_BASIC_AUDIT: $(yaml-quote ${ENABLE_APISERVER_BASIC_AUDIT:-})
|
||||
ENABLE_APISERVER_ADVANCED_AUDIT: $(yaml-quote ${ENABLE_APISERVER_ADVANCED_AUDIT:-})
|
||||
ENABLE_CACHE_MUTATION_DETECTOR: $(yaml-quote ${ENABLE_CACHE_MUTATION_DETECTOR:-false})
|
||||
ENABLE_PATCH_CONVERSION_DETECTOR: $(yaml-quote ${ENABLE_PATCH_CONVERSION_DETECTOR:-false})
|
||||
ADVANCED_AUDIT_POLICY: $(yaml-quote ${ADVANCED_AUDIT_POLICY:-})
|
||||
ADVANCED_AUDIT_BACKEND: $(yaml-quote ${ADVANCED_AUDIT_BACKEND:-log})
|
||||
ADVANCED_AUDIT_TRUNCATING_BACKEND: $(yaml-quote ${ADVANCED_AUDIT_TRUNCATING_BACKEND:-})
|
||||
ADVANCED_AUDIT_TRUNCATING_BACKEND: $(yaml-quote ${ADVANCED_AUDIT_TRUNCATING_BACKEND:-true})
|
||||
ADVANCED_AUDIT_LOG_MODE: $(yaml-quote ${ADVANCED_AUDIT_LOG_MODE:-})
|
||||
ADVANCED_AUDIT_LOG_BUFFER_SIZE: $(yaml-quote ${ADVANCED_AUDIT_LOG_BUFFER_SIZE:-})
|
||||
ADVANCED_AUDIT_LOG_MAX_BATCH_SIZE: $(yaml-quote ${ADVANCED_AUDIT_LOG_MAX_BATCH_SIZE:-})
|
||||
@@ -886,6 +897,7 @@ ENABLE_NODE_JOURNAL: $(yaml-quote ${ENABLE_NODE_JOURNAL:-false})
|
||||
PROMETHEUS_TO_SD_ENDPOINT: $(yaml-quote ${PROMETHEUS_TO_SD_ENDPOINT:-})
|
||||
PROMETHEUS_TO_SD_PREFIX: $(yaml-quote ${PROMETHEUS_TO_SD_PREFIX:-})
|
||||
ENABLE_PROMETHEUS_TO_SD: $(yaml-quote ${ENABLE_PROMETHEUS_TO_SD:-false})
|
||||
DISABLE_PROMETHEUS_TO_SD_IN_DS: $(yaml-quote ${DISABLE_PROMETHEUS_TO_SD_IN_DS:-false})
|
||||
ENABLE_POD_PRIORITY: $(yaml-quote ${ENABLE_POD_PRIORITY:-})
|
||||
CONTAINER_RUNTIME: $(yaml-quote ${CONTAINER_RUNTIME:-})
|
||||
CONTAINER_RUNTIME_ENDPOINT: $(yaml-quote ${CONTAINER_RUNTIME_ENDPOINT:-})
|
||||
@@ -898,8 +910,13 @@ VOLUME_PLUGIN_DIR: $(yaml-quote ${VOLUME_PLUGIN_DIR})
|
||||
KUBELET_ARGS: $(yaml-quote ${KUBELET_ARGS})
|
||||
REQUIRE_METADATA_KUBELET_CONFIG_FILE: $(yaml-quote true)
|
||||
ENABLE_NETD: $(yaml-quote ${ENABLE_NETD:-false})
|
||||
ENABLE_NODE_TERMINATION_HANDLER: $(yaml-quote ${ENABLE_NODE_TERMINATION_HANDLER:-false})
|
||||
CUSTOM_NETD_YAML: |
|
||||
$(echo "${CUSTOM_NETD_YAML:-}" | sed -e "s/'/''/g")
|
||||
CUSTOM_CALICO_NODE_DAEMONSET_YAML: |
|
||||
$(echo "${CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}" | sed -e "s/'/''/g")
|
||||
CUSTOM_TYPHA_DEPLOYMENT_YAML: |
|
||||
$(echo "${CUSTOM_TYPHA_DEPLOYMENT_YAML:-}" | sed -e "s/'/''/g")
|
||||
EOF
|
||||
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "gci" ]] || \
|
||||
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "gci" ]] || \
|
||||
@@ -1053,6 +1070,16 @@ EOF
|
||||
if [ -n "${ETCD_EXTRA_ARGS:-}" ]; then
|
||||
cat >>$file <<EOF
|
||||
ETCD_EXTRA_ARGS: $(yaml-quote ${ETCD_EXTRA_ARGS})
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${ETCD_SERVERS:-}" ]; then
|
||||
cat >>$file <<EOF
|
||||
ETCD_SERVERS: $(yaml-quote ${ETCD_SERVERS})
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${ETCD_SERVERS_OVERRIDES:-}" ]; then
|
||||
cat >>$file <<EOF
|
||||
ETCD_SERVERS_OVERRIDES: $(yaml-quote ${ETCD_SERVERS_OVERRIDES})
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${APISERVER_TEST_ARGS:-}" ]; then
|
||||
@@ -1088,11 +1115,6 @@ EOF
|
||||
if [ -n "${INITIAL_ETCD_CLUSTER_STATE:-}" ]; then
|
||||
cat >>$file <<EOF
|
||||
INITIAL_ETCD_CLUSTER_STATE: $(yaml-quote ${INITIAL_ETCD_CLUSTER_STATE})
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${ETCD_QUORUM_READ:-}" ]; then
|
||||
cat >>$file <<EOF
|
||||
ETCD_QUORUM_READ: $(yaml-quote ${ETCD_QUORUM_READ})
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${CLUSTER_SIGNING_DURATION:-}" ]; then
|
||||
@@ -1594,7 +1616,7 @@ function validate-node-local-ssds-ext(){
|
||||
ssdopts="${1}"
|
||||
|
||||
if [[ -z "${ssdopts[0]}" || -z "${ssdopts[1]}" || -z "${ssdopts[2]}" ]]; then
|
||||
echo -e "${color_red}Local SSD: NODE_LOCAL_SSDS_EXT is malformed, found ${ssdopts[0]-_},${ssdopts[1]-_},${ssdopts[2]-_} ${color_norm}" >&2
|
||||
echo -e "${color_red}Local SSD: NODE_LOCAL_SSDS_EXT is malformed, found ${ssdopts[0]-_},${ssdopts[1]-_},${ssdopts[2]-_} ${color_norm}" >&2
|
||||
exit 2
|
||||
fi
|
||||
if [[ "${ssdopts[1]}" != "scsi" && "${ssdopts[1]}" != "nvme" ]]; then
|
||||
@@ -2317,6 +2339,8 @@ function create-nodes() {
|
||||
if [[ -z "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
|
||||
local -r nodes="${NUM_NODES}"
|
||||
else
|
||||
echo "Creating a special node for heapster with machine-type ${HEAPSTER_MACHINE_TYPE}"
|
||||
create-heapster-node
|
||||
local -r nodes=$(( NUM_NODES - 1 ))
|
||||
fi
|
||||
|
||||
@@ -2344,13 +2368,9 @@ function create-nodes() {
|
||||
gcloud compute instance-groups managed wait-until-stable \
|
||||
"${group_name}" \
|
||||
--zone "${ZONE}" \
|
||||
--project "${PROJECT}" || true;
|
||||
--project "${PROJECT}" \
|
||||
--timeout "${MIG_WAIT_UNTIL_STABLE_TIMEOUT}" || true;
|
||||
done
|
||||
|
||||
if [[ -n "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
|
||||
echo "Creating a special node for heapster with machine-type ${HEAPSTER_MACHINE_TYPE}"
|
||||
create-heapster-node
|
||||
fi
|
||||
}
|
||||
|
||||
# Assumes:
|
||||
@@ -2943,75 +2963,6 @@ function check-resources() {
|
||||
return 0
|
||||
}
|
||||
|
||||
# Prepare to push new binaries to kubernetes cluster
|
||||
# $1 - whether prepare push to node
|
||||
function prepare-push() {
|
||||
local node="${1-}"
|
||||
#TODO(dawnchen): figure out how to upgrade a Container Linux node
|
||||
if [[ "${node}" == "true" && "${NODE_OS_DISTRIBUTION}" != "debian" ]]; then
|
||||
echo "Updating nodes in a kubernetes cluster with ${NODE_OS_DISTRIBUTION} is not supported yet." >&2
|
||||
exit 1
|
||||
fi
|
||||
if [[ "${node}" != "true" && "${MASTER_OS_DISTRIBUTION}" != "debian" ]]; then
|
||||
echo "Updating the master in a kubernetes cluster with ${MASTER_OS_DISTRIBUTION} is not supported yet." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
OUTPUT=${KUBE_ROOT}/_output/logs
|
||||
mkdir -p ${OUTPUT}
|
||||
|
||||
kube::util::ensure-temp-dir
|
||||
detect-project
|
||||
detect-master
|
||||
detect-node-names
|
||||
get-kubeconfig-basicauth
|
||||
get-kubeconfig-bearertoken
|
||||
|
||||
# Make sure we have the tar files staged on Google Storage
|
||||
tars_from_version
|
||||
|
||||
# Prepare node env vars and update MIG template
|
||||
if [[ "${node}" == "true" ]]; then
|
||||
write-node-env
|
||||
|
||||
local scope_flags=$(get-scope-flags)
|
||||
|
||||
# Ugly hack: Since it is not possible to delete instance-template that is currently
|
||||
# being used, create a temp one, then delete the old one and recreate it once again.
|
||||
local tmp_template_name="${NODE_INSTANCE_PREFIX}-template-tmp"
|
||||
create-node-instance-template $tmp_template_name
|
||||
|
||||
local template_name="${NODE_INSTANCE_PREFIX}-template"
|
||||
for group in ${INSTANCE_GROUPS[@]:-}; do
|
||||
gcloud compute instance-groups managed \
|
||||
set-instance-template "${group}" \
|
||||
--template "$tmp_template_name" \
|
||||
--zone "${ZONE}" \
|
||||
--project "${PROJECT}" || true;
|
||||
done
|
||||
|
||||
gcloud compute instance-templates delete \
|
||||
--project "${PROJECT}" \
|
||||
--quiet \
|
||||
"$template_name" || true
|
||||
|
||||
create-node-instance-template "$template_name"
|
||||
|
||||
for group in ${INSTANCE_GROUPS[@]:-}; do
|
||||
gcloud compute instance-groups managed \
|
||||
set-instance-template "${group}" \
|
||||
--template "$template_name" \
|
||||
--zone "${ZONE}" \
|
||||
--project "${PROJECT}" || true;
|
||||
done
|
||||
|
||||
gcloud compute instance-templates delete \
|
||||
--project "${PROJECT}" \
|
||||
--quiet \
|
||||
"$tmp_template_name" || true
|
||||
fi
|
||||
}
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Cluster specific test helpers used from hack/e2e.go
|
||||
|
||||
@@ -3120,3 +3071,8 @@ function ssh-to-node() {
|
||||
function prepare-e2e() {
|
||||
detect-project
|
||||
}
|
||||
|
||||
# Delete the image given by $1.
|
||||
function delete-image() {
|
||||
gcloud container images delete --quiet "$1"
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/cluster/get-kube-local.sh
generated
vendored
8
vendor/k8s.io/kubernetes/cluster/get-kube-local.sh
generated
vendored
@@ -55,8 +55,9 @@ function run {
|
||||
}
|
||||
|
||||
# Creates a kubeconfig file for the kubelet.
|
||||
# Args: destination file path
|
||||
# Args: the IP address of the API server (e.g. "http://localhost:8080"), destination file path
|
||||
function create-kubelet-kubeconfig() {
|
||||
#local api_addr="${1}"
|
||||
local destination="${2}"
|
||||
if [[ -z "${destination}" ]]; then
|
||||
echo "Must provide destination path to create Kubelet kubeconfig file!"
|
||||
@@ -85,12 +86,13 @@ EOF
|
||||
function create_cluster {
|
||||
echo "Creating a local cluster:"
|
||||
echo -e -n "\tStarting kubelet..."
|
||||
create-kubelet-kubeconfig "${KUBELET_KUBECONFIG}"
|
||||
create-kubelet-kubeconfig "http://localhost:8080" "${KUBELET_KUBECONFIG}"
|
||||
run "docker run \
|
||||
--volume=/:/rootfs:ro \
|
||||
--volume=/sys:/sys:ro \
|
||||
--volume=/var/lib/docker/:/var/lib/docker:rw \
|
||||
--volume=/var/lib/kubelet/:/var/lib/kubelet:rw \
|
||||
--volume=/usr/libexec/kubernetes/kubelet-plugins/volume/exec:/usr/libexec/kubernetes/kubelet-plugins/volume/exec:rw \
|
||||
--volume=/var/run:/var/run:rw \
|
||||
--volume=/run/xtables.lock:/run/xtables.lock:rw \
|
||||
--net=host \
|
||||
@@ -102,7 +104,7 @@ function create_cluster {
|
||||
--containerized \
|
||||
--hostname-override="127.0.0.1" \
|
||||
--address="0.0.0.0" \
|
||||
--kubeconfig=${KUBELET_KUBECONFIG}/kubelet.kubeconfig \
|
||||
--kubeconfig=${KUBELET_KUBECONFIG} \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--allow-privileged=true \
|
||||
--cluster-dns=10.0.0.10 \
|
||||
|
4
vendor/k8s.io/kubernetes/cluster/images/etcd-empty-dir-cleanup/Makefile
generated
vendored
4
vendor/k8s.io/kubernetes/cluster/images/etcd-empty-dir-cleanup/Makefile
generated
vendored
@@ -14,13 +14,13 @@
|
||||
|
||||
.PHONY: build push
|
||||
|
||||
ETCD_VERSION = 3.2.18
|
||||
ETCD_VERSION = 3.2.24
|
||||
# Image should be pulled from k8s.gcr.io, which will auto-detect
|
||||
# region (us, eu, asia, ...) and pull from the closest.
|
||||
REGISTRY = k8s.gcr.io
|
||||
# Images should be pushed to staging-k8s.gcr.io.
|
||||
PUSH_REGISTRY = staging-k8s.gcr.io
|
||||
TAG = 3.2.18.0
|
||||
TAG = 3.2.24.0
|
||||
|
||||
clean:
|
||||
rm -rf etcdctl etcd-v$(ETCD_VERSION)-linux-amd64 etcd-v$(ETCD_VERSION)-linux-amd64.tar.gz
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user