Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -1,3 +1,7 @@
### Version 8.7 (Tue September 4 2018 Zihong Zheng <zihongz@google.com>)
- Support extra `--prune-whitelist` resources in kube-addon-manager.
- Update kubectl to v1.10.7.
### Version 8.6 (Tue February 20 2018 Zihong Zheng <zihongz@google.com>)
- Allow reconcile/ensure loop to work with resource under non-kube-system namespace.
- Update kubectl to v1.9.3.

View File

@@ -15,8 +15,8 @@
IMAGE=staging-k8s.gcr.io/kube-addon-manager
ARCH?=amd64
TEMP_DIR:=$(shell mktemp -d)
VERSION=v8.6
KUBECTL_VERSION?=v1.9.3
VERSION=v8.7
KUBECTL_VERSION?=v1.10.7
ifeq ($(ARCH),amd64)
BASEIMAGE?=bashell/alpine-bash

View File

@@ -28,6 +28,29 @@
KUBECTL=${KUBECTL_BIN:-/usr/local/bin/kubectl}
KUBECTL_OPTS=${KUBECTL_OPTS:-}
# KUBECTL_PRUNE_WHITELIST is a list of resources whitelisted by
# default.
# This is currently the same with the default in:
# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/apply.go
KUBECTL_PRUNE_WHITELIST=(
core/v1/ConfigMap
core/v1/Endpoints
core/v1/Namespace
core/v1/PersistentVolumeClaim
core/v1/PersistentVolume
core/v1/Pod
core/v1/ReplicationController
core/v1/Secret
core/v1/Service
batch/v1/Job
batch/v1beta1/CronJob
extensions/v1beta1/DaemonSet
extensions/v1beta1/Deployment
extensions/v1beta1/Ingress
extensions/v1beta1/ReplicaSet
apps/v1beta1/StatefulSet
apps/v1beta1/Deployment
)
ADDON_CHECK_INTERVAL_SEC=${TEST_ADDON_CHECK_INTERVAL_SEC:-60}
ADDON_PATH=${ADDON_PATH:-/etc/kubernetes/addons}
@@ -82,6 +105,25 @@ function log() {
esac
}
# Generate kubectl prune-whitelist flags from provided resource list.
function generate_prune_whitelist_flags() {
local -r resources=($@)
for resource in "${resources[@]}"; do
printf "%s" "--prune-whitelist ${resource} "
done
}
# KUBECTL_EXTRA_PRUNE_WHITELIST is a list of extra whitelisted resources
# besides the default ones.
extra_prune_whitelist=
if [ -n "${KUBECTL_EXTRA_PRUNE_WHITELIST:-}" ]; then
extra_prune_whitelist=( ${KUBECTL_EXTRA_PRUNE_WHITELIST:-} )
fi
prune_whitelist=( ${KUBECTL_PRUNE_WHITELIST[@]} ${extra_prune_whitelist[@]} )
prune_whitelist_flags=$(generate_prune_whitelist_flags ${prune_whitelist[@]})
log INFO "== Generated kubectl prune whitelist flags: $prune_whitelist_flags =="
# $1 filename of addon to start.
# $2 count of tries to start the addon.
# $3 delay in seconds between two consecutive tries
@@ -126,12 +168,12 @@ function reconcile_addons() {
log INFO "== Reconciling with deprecated label =="
${KUBECTL} ${KUBECTL_OPTS} apply -f ${ADDON_PATH} \
-l ${CLUSTER_SERVICE_LABEL}=true,${ADDON_MANAGER_LABEL}!=EnsureExists \
--prune=true --recursive | grep -v configured
--prune=true ${prune_whitelist_flags} --recursive | grep -v configured
log INFO "== Reconciling with addon-manager label =="
${KUBECTL} ${KUBECTL_OPTS} apply -f ${ADDON_PATH} \
-l ${CLUSTER_SERVICE_LABEL}!=true,${ADDON_MANAGER_LABEL}=Reconcile \
--prune=true --recursive | grep -v configured
--prune=true ${prune_whitelist_flags} --recursive | grep -v configured
log INFO "== Kubernetes addon reconcile completed at $(date -Is) =="
}

View File

@@ -10,6 +10,7 @@ rules:
- apiGroups: [""]
resources:
- namespaces
- serviceaccounts
verbs:
- get
- list
@@ -36,6 +37,7 @@ rules:
- get
- list
- watch
- patch
- apiGroups: [""]
resources:
- nodes
@@ -51,17 +53,28 @@ rules:
- get
- list
- watch
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- clusterinformations
- hostendpoints
verbs:
- create
- get
- list
- update
- patch
- delete
- watch

View File

@@ -41,18 +41,22 @@ spec:
value: "none"
- name: DATASTORE_TYPE
value: "kubernetes"
- name: FELIX_TYPHAK8SSERVICENAME
value: "calico-typha"
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
- name: FELIX_HEALTHENABLED
value: "true"
- name: FELIX_IPV6SUPPORT
value: "false"
- name: FELIX_LOGSEVERITYSYS
value: "none"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "true"
- name: FELIX_HEALTHENABLED
value: "true"
- name: FELIX_REPORTINGINTERVALSECS
value: "0"
- name: FELIX_TYPHAK8SSERVICENAME
value: "calico-typha"
- name: IP
value: ""
- name: NO_DEFAULT_POOLS
@@ -84,6 +88,12 @@ spec:
- mountPath: /etc/calico
name: etc-calico
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
@@ -149,6 +159,12 @@ spec:
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
tolerations:
# Make sure calico/node gets scheduled on all nodes.
- effect: NoSchedule

View File

@@ -0,0 +1,15 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation

View File

@@ -0,0 +1,15 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration

View File

@@ -0,0 +1,15 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset

View File

@@ -0,0 +1,15 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: HostEndpoint
plural: hostendpoints
singular: hostendpoint

View File

@@ -0,0 +1,15 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy

View File

@@ -44,6 +44,8 @@ spec:
value: "9093"
- name: TYPHA_DATASTORETYPE
value: "kubernetes"
- name: TYPHA_REPORTINGINTERVALSECS
value: "0"
- name: TYPHA_MAXCONNECTIONSLOWERLIMIT
value: "1"
- name: TYPHA_HEALTHENABLED

View File

@@ -0,0 +1,14 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: typha-cpva
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list"]
- apiGroups: ["apps", "extensions"]
resources: ["deployments"]
verbs: ["patch"]

View File

@@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: typha-cpva
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: typha-cpva
subjects:
- kind: ServiceAccount
name: typha-cpva
namespace: kube-system

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: typha-cpva
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile

View File

@@ -25,7 +25,7 @@ spec:
# Any image is permissible as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
image: k8s.gcr.io/defaultbackend:1.4
image: k8s.gcr.io/defaultbackend-amd64:1.5
livenessProbe:
httpGet:
path: /healthz

View File

@@ -36,31 +36,31 @@ data:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.5.3
name: heapster-v1.6.0-beta.1
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.5.3
version: v1.6.0-beta.1
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.5.3
version: v1.6.0-beta.1
template:
metadata:
labels:
k8s-app: heapster
version: v1.5.3
version: v1.6.0-beta.1
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
containers:
- image: k8s.gcr.io/heapster-amd64:v1.5.3
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
name: heapster
livenessProbe:
httpGet:
@@ -73,13 +73,13 @@ spec:
- /heapster
- --source=kubernetes.summary_api:''
- --sink=gcm
- image: k8s.gcr.io/heapster-amd64:v1.5.3
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
name: eventer
command:
- /eventer
- --source=kubernetes:''
- --sink=gcl
- image: k8s.gcr.io/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.3
name: heapster-nanny
resources:
limits:
@@ -108,11 +108,14 @@ spec:
- --memory={{ base_metrics_memory }}
- --extra-memory={{metrics_memory_per_node}}Mi
- --threshold=5
- --deployment=heapster-v1.5.3
- --deployment=heapster-v1.6.0-beta.1
- --container=heapster
- --poll-period=300000
- --estimator=exponential
- image: k8s.gcr.io/addon-resizer:1.8.1
# Specifies the smallest cluster (defined in number of nodes)
# resources will be scaled to.
- --minClusterSize={{ heapster_min_cluster_size }}
- image: k8s.gcr.io/addon-resizer:1.8.2
name: eventer-nanny
resources:
limits:
@@ -141,7 +144,7 @@ spec:
- --memory={{base_eventer_memory}}
- --extra-memory={{eventer_memory_per_node}}Ki
- --threshold=5
- --deployment=heapster-v1.5.3
- --deployment=heapster-v1.6.0-beta.1
- --container=eventer
- --poll-period=300000
- --estimator=exponential

View File

@@ -36,31 +36,31 @@ data:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.5.3
name: heapster-v1.6.0-beta.1
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.5.3
version: v1.6.0-beta.1
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.5.3
version: v1.6.0-beta.1
template:
metadata:
labels:
k8s-app: heapster
version: v1.5.3
version: v1.6.0-beta.1
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
containers:
- image: k8s.gcr.io/heapster-amd64:v1.5.3
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
name: heapster
livenessProbe:
httpGet:
@@ -74,13 +74,13 @@ spec:
- --source=kubernetes.summary_api:''
- --sink=influxdb:http://monitoring-influxdb:8086
- --sink=gcm:?metrics=autoscaling
- image: k8s.gcr.io/heapster-amd64:v1.5.3
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
name: eventer
command:
- /eventer
- --source=kubernetes:''
- --sink=gcl
- image: k8s.gcr.io/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.3
name: heapster-nanny
resources:
limits:
@@ -109,11 +109,14 @@ spec:
- --memory={{ base_metrics_memory }}
- --extra-memory={{ metrics_memory_per_node }}Mi
- --threshold=5
- --deployment=heapster-v1.5.3
- --deployment=heapster-v1.6.0-beta.1
- --container=heapster
- --poll-period=300000
- --estimator=exponential
- image: k8s.gcr.io/addon-resizer:1.8.1
# Specifies the smallest cluster (defined in number of nodes)
# resources will be scaled to.
- --minClusterSize={{ heapster_min_cluster_size }}
- image: k8s.gcr.io/addon-resizer:1.8.2
name: eventer-nanny
resources:
limits:
@@ -142,7 +145,7 @@ spec:
- --memory={{ base_eventer_memory }}
- --extra-memory={{ eventer_memory_per_node }}Ki
- --threshold=5
- --deployment=heapster-v1.5.3
- --deployment=heapster-v1.6.0-beta.1
- --container=eventer
- --poll-period=300000
- --estimator=exponential

View File

@@ -36,31 +36,31 @@ data:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.5.3
name: heapster-v1.6.0-beta.1
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.5.3
version: v1.6.0-beta.1
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.5.3
version: v1.6.0-beta.1
template:
metadata:
labels:
k8s-app: heapster
version: v1.5.3
version: v1.6.0-beta.1
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
containers:
- image: k8s.gcr.io/heapster-amd64:v1.5.3
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
name: heapster
livenessProbe:
httpGet:
@@ -73,13 +73,13 @@ spec:
- /heapster
- --source=kubernetes.summary_api:''
- --sink=influxdb:http://monitoring-influxdb:8086
- image: k8s.gcr.io/heapster-amd64:v1.5.3
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
name: eventer
command:
- /eventer
- --source=kubernetes:''
- --sink=influxdb:http://monitoring-influxdb:8086
- image: k8s.gcr.io/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.3
name: heapster-nanny
resources:
limits:
@@ -108,11 +108,14 @@ spec:
- --memory={{ base_metrics_memory }}
- --extra-memory={{ metrics_memory_per_node }}Mi
- --threshold=5
- --deployment=heapster-v1.5.3
- --deployment=heapster-v1.6.0-beta.1
- --container=heapster
- --poll-period=300000
- --estimator=exponential
- image: k8s.gcr.io/addon-resizer:1.8.1
# Specifies the smallest cluster (defined in number of nodes)
# resources will be scaled to.
- --minClusterSize={{ heapster_min_cluster_size }}
- image: k8s.gcr.io/addon-resizer:1.8.2
name: eventer-nanny
resources:
limits:
@@ -141,7 +144,7 @@ spec:
- --memory={{ base_eventer_memory }}
- --extra-memory={{ eventer_memory_per_node }}Ki
- --threshold=5
- --deployment=heapster-v1.5.3
- --deployment=heapster-v1.6.0-beta.1
- --container=eventer
- --poll-period=300000
- --estimator=exponential

View File

@@ -23,31 +23,31 @@ data:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.5.3
name: heapster-v1.6.0-beta.1
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.5.3
version: v1.6.0-beta.1
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.5.3
version: v1.6.0-beta.1
template:
metadata:
labels:
k8s-app: heapster
version: v1.5.3
version: v1.6.0-beta.1
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
containers:
- image: k8s.gcr.io/heapster-amd64:v1.5.3
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
name: heapster
livenessProbe:
httpGet:
@@ -57,12 +57,13 @@ spec:
initialDelaySeconds: 180
timeoutSeconds: 5
command:
# On GCP, container.googleapis.com/instance_id node annotation is used to provide instance_id label for Stackdriver
- /heapster
- --source=kubernetes.summary_api:''
- --source=kubernetes.summary_api:?host_id_annotation=container.googleapis.com/instance_id
- --sink=stackdriver:?cluster_name={{ cluster_name }}&use_old_resources={{ use_old_resources }}&use_new_resources={{ use_new_resources }}&min_interval_sec=100&batch_export_timeout_sec=110&cluster_location={{ cluster_location }}
# BEGIN_PROMETHEUS_TO_SD
- name: prom-to-sd
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
image: k8s.gcr.io/prometheus-to-sd:v0.3.1
command:
- /monitor
- --source=heapster:http://localhost:8082?whitelisted=stackdriver_requests_count,stackdriver_timeseries_count
@@ -80,7 +81,7 @@ spec:
fieldRef:
fieldPath: metadata.namespace
# END_PROMETHEUS_TO_SD
- image: k8s.gcr.io/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.3
name: heapster-nanny
resources:
limits:
@@ -109,10 +110,13 @@ spec:
- --memory={{ base_metrics_memory }}
- --extra-memory={{metrics_memory_per_node}}Mi
- --threshold=5
- --deployment=heapster-v1.5.3
- --deployment=heapster-v1.6.0-beta.1
- --container=heapster
- --poll-period=300000
- --estimator=exponential
# Specifies the smallest cluster (defined in number of nodes)
# resources will be scaled to.
- --minClusterSize={{ heapster_min_cluster_size }}
volumes:
- name: heapster-config-volume
configMap:

View File

@@ -23,31 +23,31 @@ data:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.5.3
name: heapster-v1.6.0-beta.1
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.5.3
version: v1.6.0-beta.1
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.5.3
version: v1.6.0-beta.1
template:
metadata:
labels:
k8s-app: heapster
version: v1.5.3
version: v1.6.0-beta.1
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
containers:
- image: k8s.gcr.io/heapster-amd64:v1.5.3
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
name: heapster
livenessProbe:
httpGet:
@@ -59,7 +59,7 @@ spec:
command:
- /heapster
- --source=kubernetes.summary_api:''
- image: k8s.gcr.io/addon-resizer:1.8.1
- image: k8s.gcr.io/addon-resizer:1.8.3
name: heapster-nanny
resources:
limits:
@@ -88,10 +88,13 @@ spec:
- --memory={{ base_metrics_memory }}
- --extra-memory={{ metrics_memory_per_node }}Mi
- --threshold=5
- --deployment=heapster-v1.5.3
- --deployment=heapster-v1.6.0-beta.1
- --container=heapster
- --poll-period=300000
- --estimator=exponential
# Specifies the smallest cluster (defined in number of nodes)
# resources will be scaled to.
- --minClusterSize={{ heapster_min_cluster_size }}
volumes:
- name: heapster-config-volume
configMap:

View File

@@ -1,6 +1,12 @@
approvers:
- floreks
- maciaszczykm
- bryk
reviewers:
- cheld
- cupofcat
- danielromlein
- floreks
- ianlewis
- konryd
- maciaszczykm
- mhenc
- rf232

View File

@@ -29,7 +29,7 @@ metadata:
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list"]
verbs: ["list", "watch"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
@@ -82,7 +82,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: autoscaler
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.2.0
resources:
requests:
cpu: "20m"

View File

@@ -66,7 +66,9 @@ data:
prometheus :9153
proxy . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
---
apiVersion: extensions/v1beta1
@@ -106,7 +108,7 @@ spec:
operator: "Exists"
containers:
- name: coredns
image: k8s.gcr.io/coredns:1.1.3
image: k8s.gcr.io/coredns:1.2.2
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -161,6 +163,7 @@ metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns

View File

@@ -66,7 +66,9 @@ data:
prometheus :9153
proxy . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
---
apiVersion: extensions/v1beta1
@@ -106,7 +108,7 @@ spec:
operator: "Exists"
containers:
- name: coredns
image: k8s.gcr.io/coredns:1.1.3
image: k8s.gcr.io/coredns:1.2.2
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -161,6 +163,7 @@ metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns

View File

@@ -66,7 +66,9 @@ data:
prometheus :9153
proxy . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
---
apiVersion: extensions/v1beta1
@@ -106,7 +108,7 @@ spec:
operator: "Exists"
containers:
- name: coredns
image: k8s.gcr.io/coredns:1.1.3
image: k8s.gcr.io/coredns:1.2.2
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -161,6 +163,7 @@ metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns

View File

@@ -9,7 +9,7 @@ can use the DNS Services IP to resolve DNS names.
## Manually scale kube-dns Deployment
kube-dns creates only one DNS Pod by default. If
[dns-horizontal-autoscaler](../dns-horizontal-autoscaler/)
[dns-horizontal-autoscaler](../../dns-horizontal-autoscaler/)
is not enabled, you may need to manually scale kube-dns Deployment.
Please use below `kubectl scale` command to scale:
@@ -18,9 +18,9 @@ kubectl --namespace=kube-system scale deployment kube-dns --replicas=<NUM_YOU_WA
```
Do not use `kubectl edit` to modify kube-dns Deployment object if it is
controlled by [Addon Manager](../addon-manager/). Otherwise the modifications
controlled by [Addon Manager](../../addon-manager/). Otherwise the modifications
will be clobbered, in addition the replicas count for kube-dns Deployment will
be reset to 1. See [Cluster add-ons README](../README.md) and
be reset to 1. See [Cluster add-ons README](../../README.md) and
[#36411](https://github.com/kubernetes/kubernetes/issues/36411) for reference.
## kube-dns addon templates

View File

@@ -96,7 +96,7 @@ spec:
optional: true
containers:
- name: kubedns
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10
image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@@ -147,7 +147,7 @@ spec:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@@ -166,6 +166,7 @@ spec:
- -k
- --cache-size=1000
- --no-negcache
- --dns-loop-detect
- --log-facility=-
- --server=/__PILLAR__DNS__DOMAIN__/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
@@ -186,7 +187,7 @@ spec:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10
image: k8s.gcr.io/k8s-dns-sidecar:1.14.13
livenessProbe:
httpGet:
path: /metrics

View File

@@ -96,7 +96,7 @@ spec:
optional: true
containers:
- name: kubedns
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10
image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@@ -147,7 +147,7 @@ spec:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@@ -166,6 +166,7 @@ spec:
- -k
- --cache-size=1000
- --no-negcache
- --dns-loop-detect
- --log-facility=-
- --server=/{{ pillar['dns_domain'] }}/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
@@ -186,7 +187,7 @@ spec:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10
image: k8s.gcr.io/k8s-dns-sidecar:1.14.13
livenessProbe:
httpGet:
path: /metrics

View File

@@ -96,7 +96,7 @@ spec:
optional: true
containers:
- name: kubedns
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10
image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@@ -147,7 +147,7 @@ spec:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@@ -166,6 +166,7 @@ spec:
- -k
- --cache-size=1000
- --no-negcache
- --dns-loop-detect
- --log-facility=-
- --server=/$DNS_DOMAIN/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
@@ -186,7 +187,7 @@ spec:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10
image: k8s.gcr.io/k8s-dns-sidecar:1.14.13
livenessProbe:
httpGet:
path: /metrics

View File

@@ -4,3 +4,5 @@ approvers:
reviewers:
- coffeepac
- piosz
labels:
- sig/instrumentation

View File

@@ -19,15 +19,16 @@ a Deployment, but allows for maintaining state on storage volumes.
### Security
Elasticsearch has capabilities to enable authorization using the
[X-Pack plugin][xPack]. See configuration parameter `xpack.security.enabled`
in Elasticsearch and Kibana configurations. It can also be set via the
`XPACK_SECURITY_ENABLED` env variable. After enabling the feature,
follow [official documentation][setupCreds] to set up credentials in
Elasticsearch and Kibana. Don't forget to propagate those credentials also to
Fluentd in its [configuration][fluentdCreds], using for example
[environment variables][fluentdEnvVar]. You can utilize [ConfigMaps][configMap]
and [Secrets][secret] to store credentials in the Kubernetes apiserver.
Elasticsearch has capabilities to enable authorization using the [X-Pack
plugin][xPack]. For the sake of simplicity this example uses the fully open
source prebuild images from elastic that do not contain the X-Pack plugin. If
you need these features, please consider building the images from either the
"basic" or "platinum" version. After enabling these features, follow [official
documentation][setupCreds] to set up credentials in Elasticsearch and Kibana.
Don't forget to propagate those credentials also to Fluentd in its
[configuration][fluentdCreds], using for example [environment
variables][fluentdEnvVar]. You can utilize [ConfigMaps][configMap] and
[Secrets][secret] to store credentials in the Kubernetes apiserver.
### Initialization

View File

@@ -18,11 +18,11 @@ go_library(
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
],
)

View File

@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM docker.elastic.co/elasticsearch/elasticsearch:5.6.4
FROM docker.elastic.co/elasticsearch/elasticsearch-oss:6.3.2
VOLUME ["/data"]
EXPOSE 9200 9300

View File

@@ -16,7 +16,7 @@
PREFIX = staging-k8s.gcr.io
IMAGE = elasticsearch
TAG = v5.6.4
TAG = v6.3.0
build:
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .

View File

@@ -12,6 +12,3 @@ path.data: /data
network.host: 0.0.0.0
discovery.zen.minimum_master_nodes: ${MINIMUM_MASTER_NODES}
xpack.security.enabled: false
xpack.monitoring.enabled: false

View File

@@ -20,6 +20,7 @@ import (
"flag"
"fmt"
"os"
"strconv"
"strings"
"time"
@@ -104,7 +105,7 @@ func main() {
var endpoints *api.Endpoints
addrs := []string{}
// Wait for some endpoints.
count := 0
count, _ := strconv.Atoi(os.Getenv("MINIMUM_MASTER_NODES"))
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
endpoints, err = client.Core().Endpoints(namespace).Get(serviceName, metav1.GetOptions{})
if err != nil {
@@ -115,7 +116,6 @@ func main() {
if len(addrs) > 0 && len(addrs) == count {
break
}
count = len(addrs)
}
// If there was an error finding endpoints then log a warning and quit.
if err != nil {

View File

@@ -26,4 +26,4 @@ export MINIMUM_MASTER_NODES=${MINIMUM_MASTER_NODES:-2}
chown -R elasticsearch:elasticsearch /data
./bin/elasticsearch_logging_discovery >> ./config/elasticsearch.yml
exec su elasticsearch -c ./bin/es-docker
exec su elasticsearch -c /usr/local/bin/docker-entrypoint.sh

View File

@@ -54,7 +54,7 @@ metadata:
namespace: kube-system
labels:
k8s-app: elasticsearch-logging
version: v5.6.4
version: v6.2.5
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
@@ -63,17 +63,17 @@ spec:
selector:
matchLabels:
k8s-app: elasticsearch-logging
version: v5.6.4
version: v6.2.5
template:
metadata:
labels:
k8s-app: elasticsearch-logging
version: v5.6.4
version: v6.2.5
kubernetes.io/cluster-service: "true"
spec:
serviceAccountName: elasticsearch-logging
containers:
- image: k8s.gcr.io/elasticsearch:v5.6.4
- image: k8s.gcr.io/elasticsearch:v6.2.5
name: elasticsearch-logging
resources:
# need more cpu upon initialization, therefore burstable class

View File

@@ -1,7 +1,7 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: fluentd-es-config-v0.1.4
name: fluentd-es-config-v0.1.5
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
@@ -115,7 +115,6 @@ data:
@type tail
path /var/log/containers/*.log
pos_file /var/log/es-containers.log.pos
time_format %Y-%m-%dT%H:%M:%S.%NZ
tag raw.kubernetes.*
read_from_head true
<parse>
@@ -273,21 +272,6 @@ data:
tag kube-scheduler
</source>
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
<source>
@id rescheduler.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/rescheduler.log
pos_file /var/log/es-rescheduler.log.pos
tag rescheduler
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
@@ -323,10 +307,11 @@ data:
<source>
@id journald-docker
@type systemd
filters [{ "_SYSTEMD_UNIT": "docker.service" }]
matches [{ "_SYSTEMD_UNIT": "docker.service" }]
<storage>
@type local
persistent true
path /var/log/journald-docker.pos
</storage>
read_from_head true
tag docker
@@ -335,10 +320,11 @@ data:
<source>
@id journald-container-runtime
@type systemd
filters [{ "_SYSTEMD_UNIT": "{{ container_runtime }}.service" }]
matches [{ "_SYSTEMD_UNIT": "{{ container_runtime }}.service" }]
<storage>
@type local
persistent true
path /var/log/journald-container-runtime.pos
</storage>
read_from_head true
tag container-runtime
@@ -347,10 +333,11 @@ data:
<source>
@id journald-kubelet
@type systemd
filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
matches [{ "_SYSTEMD_UNIT": "kubelet.service" }]
<storage>
@type local
persistent true
path /var/log/journald-kubelet.pos
</storage>
read_from_head true
tag kubelet
@@ -359,22 +346,24 @@ data:
<source>
@id journald-node-problem-detector
@type systemd
filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
matches [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
<storage>
@type local
persistent true
path /var/log/journald-node-problem-detector.pos
</storage>
read_from_head true
tag node-problem-detector
</source>
<source>
@id kernel
@type systemd
filters [{ "_TRANSPORT": "kernel" }]
matches [{ "_TRANSPORT": "kernel" }]
<storage>
@type local
persistent true
path /var/log/kernel.pos
</storage>
<entry>
fields_strip_underscores true
@@ -435,6 +424,7 @@ data:
@id elasticsearch
@type elasticsearch
@log_level info
type_name fluentd
include_tag_key true
host elasticsearch-logging
port 9200

View File

@@ -48,24 +48,24 @@ roleRef:
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd-es-v2.0.4
name: fluentd-es-v2.2.0
namespace: kube-system
labels:
k8s-app: fluentd-es
version: v2.0.4
version: v2.2.0
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: fluentd-es
version: v2.0.4
version: v2.2.0
template:
metadata:
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
version: v2.0.4
version: v2.2.0
# This annotation ensures that fluentd does not get evicted if the node
# supports critical pod annotation based priority scheme.
# Note that this does not guarantee admission on the nodes (#40573).
@@ -77,7 +77,7 @@ spec:
serviceAccountName: fluentd-es
containers:
- name: fluentd-es
image: k8s.gcr.io/fluentd-elasticsearch:v2.0.4
image: k8s.gcr.io/fluentd-elasticsearch:v2.2.0
env:
- name: FLUENTD_ARGS
value: --no-supervisor -q
@@ -107,4 +107,4 @@ spec:
path: /var/lib/docker/containers
- name: config-volume
configMap:
name: fluentd-es-config-v0.1.4
name: fluentd-es-config-v0.1.5

View File

@@ -1,11 +1,11 @@
source 'https://rubygems.org'
gem 'fluentd', '<=1.1.0'
gem 'activesupport', '~>5.1.4'
gem 'fluent-plugin-kubernetes_metadata_filter', '~>1.0.0'
gem 'fluent-plugin-elasticsearch', '~>2.4.1'
gem 'fluent-plugin-systemd', '~>0.3.1'
gem 'fluent-plugin-detect-exceptions', '~>0.0.9'
gem 'fluent-plugin-prometheus', '~>0.3.0'
gem 'fluentd', '<=1.2.4'
gem 'activesupport', '~>5.2.1'
gem 'fluent-plugin-kubernetes_metadata_filter', '~>2.0.0'
gem 'fluent-plugin-elasticsearch', '~>2.11.5'
gem 'fluent-plugin-systemd', '~>1.0.1'
gem 'fluent-plugin-detect-exceptions', '~>0.0.11'
gem 'fluent-plugin-prometheus', '~>1.0.1'
gem 'fluent-plugin-multi-format-parser', '~>1.0.0'
gem 'oj', '~>3.3.1.0'
gem 'oj', '~>3.6.5'

View File

@@ -16,7 +16,7 @@
PREFIX = staging-k8s.gcr.io
IMAGE = fluentd-elasticsearch
TAG = v2.0.4
TAG = v2.3.1
build:
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .

View File

@@ -21,7 +21,7 @@ spec:
spec:
containers:
- name: kibana-logging
image: docker.elastic.co/kibana/kibana:5.6.4
image: docker.elastic.co/kibana/kibana-oss:6.3.2
resources:
# need more cpu upon initialization, therefore burstable class
limits:
@@ -33,10 +33,6 @@ spec:
value: http://elasticsearch-logging:9200
- name: SERVER_BASEPATH
value: /api/v1/namespaces/kube-system/services/kibana-logging/proxy
- name: XPACK_MONITORING_ENABLED
value: "false"
- name: XPACK_SECURITY_ENABLED
value: "false"
ports:
- containerPort: 5601
name: ui

View File

@@ -4,3 +4,5 @@ approvers:
reviewers:
- piosz
- x13n
labels:
- sig/gcp

View File

@@ -29,11 +29,11 @@ subjects:
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: event-exporter-v0.2.1
name: event-exporter-v0.2.3
namespace: kube-system
labels:
k8s-app: event-exporter
version: v0.2.1
version: v0.2.3
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
@@ -42,18 +42,18 @@ spec:
metadata:
labels:
k8s-app: event-exporter
version: v0.2.1
version: v0.2.3
spec:
serviceAccountName: event-exporter-sa
containers:
- name: event-exporter
image: k8s.gcr.io/event-exporter:v0.2.1
image: k8s.gcr.io/event-exporter:v0.2.3
command:
- /event-exporter
- -sink-opts=-stackdriver-resource-model={{ exporter_sd_resource_model }}
# BEGIN_PROMETHEUS_TO_SD
- name: prometheus-to-sd-exporter
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
image: k8s.gcr.io/prometheus-to-sd:v0.3.1
command:
- /monitor
- --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons

View File

@@ -210,20 +210,6 @@ data:
tag kube-scheduler
</source>
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
<source>
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/rescheduler.log
pos_file /var/log/gcp-rescheduler.log.pos
tag rescheduler
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>

View File

@@ -98,6 +98,8 @@ data:
# instead of jsonPayload after extracting 'time', 'severity' and
# 'stream' from the record.
message ${record['log']}
# If 'severity' is not set, assume stderr is ERROR and stdout is INFO.
severity ${record['severity'] || if record['stream'] == 'stderr' then 'ERROR' else 'INFO' end}
</record>
tag ${if record['stream'] == 'stderr' then 'raw.stderr' else 'raw.stdout' end}
remove_keys stream,log
@@ -108,8 +110,8 @@ data:
@type detect_exceptions
remove_tag_prefix raw
message log
stream stream
message message
stream "logging.googleapis.com/local_resource_id"
multiline_flush_interval 5
max_bytes 500000
max_lines 1000
@@ -223,20 +225,6 @@ data:
tag kube-scheduler
</source>
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
<source>
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/rescheduler.log
pos_file /var/log/gcp-rescheduler.log.pos
tag rescheduler
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
@@ -386,6 +374,12 @@ data:
@type null
</match>
# Add a unique insertId to each log entry that doesn't already have it.
# This helps guarantee the order and prevent log duplication.
<filter **>
@type add_insert_ids
</filter>
# This section is exclusive for k8s_container logs. These logs come with
# 'stderr'/'stdout' tags.
# We use a separate output stanza for 'k8s_node' logs with a smaller buffer
@@ -408,9 +402,9 @@ data:
buffer_queue_full_action block
# Set the chunk limit conservatively to avoid exceeding the recommended
# chunk size of 5MB per write request.
buffer_chunk_limit 1M
buffer_chunk_limit 512k
# Cap the combined memory usage of this buffer and the one below to
# 1MiB/chunk * (6 + 2) chunks = 8 MiB
# 512KiB/chunk * (6 + 2) chunks = 4 MiB
buffer_queue_limit 6
# Never wait more than 5 seconds before flushing logs in the non-error case.
flush_interval 5s
@@ -421,8 +415,9 @@ data:
# Use multiple threads for processing.
num_threads 2
use_grpc true
# Use Metadata Agent to get monitored resource.
enable_metadata_agent true
# Skip timestamp adjustment as this is in a controlled environment with
# known timestamp format. This helps with CPU usage.
adjust_invalid_timestamps false
</match>
# Attach local_resource_id for 'k8s_node' monitored resource.
@@ -450,15 +445,16 @@ data:
buffer_type file
buffer_path /var/log/fluentd-buffers/kubernetes.system.buffer
buffer_queue_full_action block
buffer_chunk_limit 1M
buffer_chunk_limit 512k
buffer_queue_limit 2
flush_interval 5s
max_retry_wait 30
disable_retry_limit
num_threads 2
use_grpc true
# Use Metadata Agent to get monitored resource.
enable_metadata_agent true
# Skip timestamp adjustment as this is in a controlled environment with
# known timestamp format. This helps with CPU usage.
adjust_invalid_timestamps false
</match>
metadata:
name: fluentd-gcp-config-v1.2.5

View File

@@ -1,13 +1,13 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: fluentd-gcp-v3.0.0
name: fluentd-gcp-{{ fluentd_gcp_yaml_version }}
namespace: kube-system
labels:
k8s-app: fluentd-gcp
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v3.0.0
version: {{ fluentd_gcp_yaml_version }}
spec:
updateStrategy:
type: RollingUpdate
@@ -16,7 +16,7 @@ spec:
labels:
k8s-app: fluentd-gcp
kubernetes.io/cluster-service: "true"
version: v3.0.0
version: {{ fluentd_gcp_yaml_version }}
# This annotation ensures that fluentd does not get evicted if the node
# supports critical pod annotation based priority scheme.
# Note that this does not guarantee admission on the nodes (#40573).
@@ -26,6 +26,7 @@ spec:
priorityClassName: system-node-critical
serviceAccountName: fluentd-gcp
dnsPolicy: Default
hostNetwork: true
containers:
- name: fluentd-gcp
image: gcr.io/stackdriver-agents/stackdriver-logging-agent:{{ fluentd_gcp_version }}
@@ -79,7 +80,7 @@ spec:
fi;
# BEGIN_PROMETHEUS_TO_SD
- name: prometheus-to-sd-exporter
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
image: k8s.gcr.io/prometheus-to-sd:v0.3.1
command:
- /monitor
- --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons
@@ -106,7 +107,12 @@ spec:
effect: "NoExecute"
- operator: "Exists"
effect: "NoSchedule"
terminationGracePeriodSeconds: 30
terminationGracePeriodSeconds: 60
tolerations:
- operator: "Exists"
effect: "NoExecute"
- operator: "Exists"
effect: "NoSchedule"
volumes:
- name: varlog
hostPath:

View File

@@ -5,7 +5,7 @@ metadata:
namespace: kube-system
labels:
k8s-app: fluentd-gcp-scaler
version: v0.3.0
version: v0.5.0
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
@@ -19,10 +19,10 @@ spec:
serviceAccountName: fluentd-gcp-scaler
containers:
- name: fluentd-gcp-scaler
image: k8s.gcr.io/fluentd-gcp-scaler:0.3
image: k8s.gcr.io/fluentd-gcp-scaler:0.5
command:
- /scaler.sh
- --ds-name=fluentd-gcp-v3.0.0
- --ds-name=fluentd-gcp-{{ fluentd_gcp_yaml_version }}
- --scaling-policy=fluentd-gcp-scaling-policy
env:
# Defaults, used if no overrides are found in fluentd-gcp-scaling-policy

View File

@@ -29,7 +29,9 @@ spec:
hostNetwork: true
containers:
- name: ip-masq-agent
image: k8s.gcr.io/ip-masq-agent-amd64:v2.0.2
image: k8s.gcr.io/ip-masq-agent-amd64:v2.1.1
args:
- --masq-chain=IP-MASQ
resources:
requests:
cpu: 10m
@@ -52,5 +54,9 @@ spec:
- key: config
path: ip-masq-agent
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
- key: "CriticalAddonsOnly"
operator: "Exists"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -24,7 +24,7 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
{{pod_priority}}
priorityClassName: system-node-critical
hostNetwork: true
nodeSelector:
beta.kubernetes.io/kube-proxy-ds-ready: "true"

View File

@@ -7,9 +7,7 @@ metadata:
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
- "apps"
- "extensions"
- "*"
resources:
- "*"
verbs:

View File

@@ -7,22 +7,6 @@ metadata:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: v1
kind: ConfigMap
metadata:
name: metadata-agent-config
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
data:
node_level.conf: |-
KubernetesUseWatch: true
KubernetesClusterLevelMetadata: false
cluster_level.conf: |-
KubernetesUseWatch: true
KubernetesClusterLevelMetadata: true
---
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
@@ -44,28 +28,24 @@ spec:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
serviceAccountName: metadata-agent
priorityClassName: system-node-critical
containers:
- image: gcr.io/stackdriver-agents/stackdriver-metadata-agent:0.2-0.0.19-1
- image: gcr.io/stackdriver-agents/stackdriver-metadata-agent:0.2-0.0.21-1
imagePullPolicy: IfNotPresent
name: metadata-agent
livenessProbe:
exec:
command:
- /bin/bash
- -c
- |
if [[ -f /var/run/metadata-agent/health/unhealthy ]]; then
exit 1;
fi
periodSeconds: 10
httpGet:
path: /healthz
port: 8000
initialDelaySeconds: 30
periodSeconds: 60
timeoutSeconds: 5
failureThreshold: 1
successThreshold: 1
volumeMounts:
- name: metadata-agent-config-volume
mountPath: /etc/config
command:
- /opt/stackdriver/metadata/sbin/metadatad
- --config-file=/etc/config/node_level.conf
args:
- -o KubernetesUseWatch=true
- -o KubernetesClusterLevelMetadata=false
- -o MetadataReporterPurgeDeleted=true
ports:
- containerPort: 8000
hostPort: 8799
@@ -78,10 +58,11 @@ spec:
restartPolicy: Always
schedulerName: default-scheduler
terminationGracePeriodSeconds: 30
volumes:
- name: metadata-agent-config-volume
configMap:
name: metadata-agent-config
tolerations:
- operator: "Exists"
effect: "NoExecute"
- operator: "Exists"
effect: "NoSchedule"
updateStrategy:
rollingUpdate:
maxUnavailable: 1
@@ -109,28 +90,24 @@ spec:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
serviceAccountName: metadata-agent
priorityClassName: system-cluster-critical
containers:
- image: gcr.io/stackdriver-agents/stackdriver-metadata-agent:0.2-0.0.19-1
- image: gcr.io/stackdriver-agents/stackdriver-metadata-agent:0.2-0.0.21-1
imagePullPolicy: IfNotPresent
name: metadata-agent
livenessProbe:
exec:
command:
- /bin/bash
- -c
- |
if [[ -f /var/run/metadata-agent/health/unhealthy ]]; then
exit 1;
fi
periodSeconds: 10
httpGet:
path: /healthz
port: 8000
initialDelaySeconds: 30
periodSeconds: 60
timeoutSeconds: 5
failureThreshold: 1
successThreshold: 1
volumeMounts:
- name: metadata-agent-config-volume
mountPath: /etc/config
command:
- /opt/stackdriver/metadata/sbin/metadatad
- --config-file=/etc/config/cluster_level.conf
args:
- -o KubernetesUseWatch=true
- -o KubernetesClusterLevelMetadata=true
- -o MetadataReporterPurgeDeleted=true
ports:
- containerPort: 8000
protocol: TCP
@@ -142,10 +119,6 @@ spec:
restartPolicy: Always
schedulerName: default-scheduler
terminationGracePeriodSeconds: 30
volumes:
- name: metadata-agent-config-volume
configMap:
name: metadata-agent-config
strategy:
rollingUpdate:
maxUnavailable: 1

View File

@@ -0,0 +1,16 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gce:podsecuritypolicy:metadata-agent
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gce:podsecuritypolicy:privileged
subjects:
- kind: ServiceAccount
name: metadata-agent
namespace: kube-system

View File

@@ -44,7 +44,7 @@ spec:
effect: "NoSchedule"
containers:
- name: metadata-proxy
image: k8s.gcr.io/metadata-proxy:v0.1.9
image: k8s.gcr.io/metadata-proxy:v0.1.10
securityContext:
privileged: true
# Request and limit resources to get guaranteed QoS.
@@ -57,7 +57,7 @@ spec:
cpu: "30m"
# BEGIN_PROMETHEUS_TO_SD
- name: prometheus-to-sd-exporter
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
image: k8s.gcr.io/prometheus-to-sd:v0.3.1
# Request and limit resources to get guaranteed QoS.
resources:
requests:

View File

@@ -23,24 +23,24 @@ data:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: metrics-server-v0.2.1
name: metrics-server-v0.3.1
namespace: kube-system
labels:
k8s-app: metrics-server
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v0.2.1
version: v0.3.1
spec:
selector:
matchLabels:
k8s-app: metrics-server
version: v0.2.1
version: v0.3.1
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
version: v0.2.1
version: v0.3.1
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
@@ -49,16 +49,20 @@ spec:
serviceAccountName: metrics-server
containers:
- name: metrics-server
image: k8s.gcr.io/metrics-server-amd64:v0.2.1
image: k8s.gcr.io/metrics-server-amd64:v0.3.1
command:
- /metrics-server
- --source=kubernetes.summary_api:''
- --metric-resolution=30s
# These are needed for GKE, which doesn't support secure communication yet.
# Remove these lines for non-GKE clusters, and when GKE supports token-based auth.
- --kubelet-port=10255
- --deprecated-kubelet-completely-insecure=true
ports:
- containerPort: 443
name: https
protocol: TCP
- name: metrics-server-nanny
image: k8s.gcr.io/addon-resizer:1.8.1
image: k8s.gcr.io/addon-resizer:1.8.3
resources:
limits:
cpu: 100m
@@ -81,15 +85,18 @@ spec:
command:
- /pod_nanny
- --config-dir=/etc/config
- --cpu=40m
- --cpu={{ base_metrics_server_cpu }}
- --extra-cpu=0.5m
- --memory=40Mi
- --extra-memory=4Mi
- --memory={{ base_metrics_server_memory }}
- --extra-memory={{ metrics_server_memory_per_node }}Mi
- --threshold=5
- --deployment=metrics-server-v0.2.1
- --deployment=metrics-server-v0.3.1
- --container=metrics-server
- --poll-period=300000
- --estimator=exponential
# Specifies the smallest cluster (defined in number of nodes)
# resources will be scaled to.
- --minClusterSize={{ metrics_server_min_cluster_size }}
volumes:
- name: metrics-server-config-volume
configMap:

View File

@@ -33,6 +33,9 @@ rules:
- apiGroups: ["apps"]
resources: ["statefulsets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["get", "list", "watch"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get", "list", "watch"]

View File

@@ -0,0 +1,5 @@
approvers:
- tallclair
- dchen1107
reviewers:
- sig-node-reviewers

View File

@@ -0,0 +1,12 @@
# RuntimeClass
RuntimeClass is an alpha feature for supporting multiple container runtimes within a cluster. When
enabled, pods can select a RuntimeClass to run with using the `PodSpec.RuntimeClassName` field.
To enable RuntimeClass, set the feature gate `RuntimeClass=true`, and ensure the CRD defined in this
directory is installed.
For more information, see:
https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/runtimeclass/README.md?pixel)]()

View File

@@ -0,0 +1,26 @@
kind: CustomResourceDefinition
apiVersion: apiextensions.k8s.io/v1beta1
metadata:
name: runtimeclasses.node.k8s.io
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
group: node.k8s.io
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
names:
plural: runtimeclasses
singular: runtimeclass
kind: RuntimeClass
scope: Cluster
validation:
openAPIV3Schema:
properties:
spec:
properties:
runtimeHandler:
type: string
pattern: '^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)?$'

View File

@@ -6,5 +6,5 @@ metadata:
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
labels:
addonmanager.kubernetes.io/mode: Reconcile
addonmanager.kubernetes.io/mode: EnsureExists
provisioner: kubernetes.io/host-path