Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -3,12 +3,12 @@ kind: RoleBinding
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
name: gce:cloud-provider
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cloud-provider
name: gce:cloud-provider
subjects:
- kind: ServiceAccount
name: cloud-provider
@@ -19,11 +19,11 @@ kind: ClusterRoleBinding
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
name: gce:cloud-provider
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cloud-provider
name: gce:cloud-provider
subjects:
- kind: ServiceAccount
name: cloud-provider

View File

@@ -3,7 +3,7 @@ kind: Role
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
name: gce:cloud-provider
namespace: kube-system
rules:
- apiGroups:
@@ -23,7 +23,51 @@ kind: ClusterRole
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
name: gce:cloud-provider
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
namespace: kube-system
annotations:
kubernetes.io/deprecation: 'cloud-provider role is DEPRECATED in the
concern of potential collisions and will be removed in 1.16. Do not use
this role.'
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- get
- patch
- update
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: cloud-provider
annotations:
kubernetes.io/deprecation: 'cloud-provider clusterrole is DEPRECATED in the
concern of potential collisions and will be removed in 1.16. Do not use
this role.'
rules:
- apiGroups:
- ""

View File

@@ -0,0 +1,4 @@
# GCE Node Termination Handler
This addon deploys [GCE Node Termination Handler](https://github.com/GoogleCloudPlatform/k8s-node-termination-handler) on to kubernetes clusters on GCP.
It is meant to help translate GCE VM termination notifications into kubernetes graceful terminations.

View File

@@ -0,0 +1,76 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
k8s-app: node-termination-handler
namespace: kube-system
name: node-termination-handler
spec:
selector:
matchLabels:
k8s-app: node-termination-handler
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
k8s-app: node-termination-handler
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-node-critical
# Necessary to reboot node
hostPID: true
affinity:
nodeAffinity:
# Restrict to GPU nodes or preemptible nodes
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: cloud.google.com/gke-accelerator
operator: Exists
- matchExpressions:
- key: cloud.google.com/gke-preemptible
operator: Exists
volumes:
- name: klet-service-account
hostPath:
path: /var/lib/kubelet
- name: klet-ca-crt
hostPath:
path: /etc/srv/kubernetes
tolerations:
# Run regardless of any existing taints.
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
containers:
- image: k8s.gcr.io/gke-node-termination-handler@sha256:e08ca863a547754fa7b75064bdad04f04cbef86c7b0a181ecc7304e747623181
name: node-termination-handler
command: ["./node-termination-handler"]
args: ["--logtostderr", "--exclude-pods=$(POD_NAME):$(POD_NAMESPACE)", "-v=10", "--kubeconfig=/var/lib/kubelet/kubeconfig", "--annotation=cloud.google.com/impending-node-termination"]
securityContext:
capabilities:
# Necessary to reboot node
add: ["SYS_BOOT"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
resources:
limits:
cpu: 50m
memory: 30Mi
volumeMounts:
- name: klet-service-account
mountPath: /var/lib/kubelet
- name: klet-ca-crt
mountPath: /etc/srv/kubernetes

View File

@@ -19,6 +19,22 @@ metadata:
spec:
privileged: false
allowPrivilegeEscalation: false
# The docker default set of capabilities
allowedCapabilities:
- SETPCAP
- MKNOD
- AUDIT_WRITE
- CHOWN
- NET_RAW
- DAC_OVERRIDE
- FOWNER
- FSETID
- KILL
- SETGID
- SETUID
- NET_BIND_SERVICE
- SYS_CHROOT
- SETFCAP
volumes:
- 'emptyDir'
- 'configMap'

View File

@@ -99,13 +99,13 @@ function get-cluster-ip-range {
}
# Calculate ip alias range based on max number of pods.
# Let pow be the smallest integer which is bigger than log2($1 * 2).
# Let pow be the smallest integer which is bigger or equal to log2($1 * 2).
# (32 - pow) will be returned.
#
# $1: The number of max pods limitation.
function get-alias-range-size() {
for pow in {0..31}; do
if (( 1 << $pow > $1 * 2 )); then
if (( 1 << $pow >= $1 * 2 )); then
echo $((32 - pow))
return 0
fi

View File

@@ -37,6 +37,14 @@ MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)}
NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
NODE_LOCAL_SSDS=${NODE_LOCAL_SSDS:-0}
# Historically fluentd was a manifest pod and then was migrated to DaemonSet.
# To avoid situation during cluster upgrade when there are two instances
# of fluentd running on a node, kubelet need to mark node on which
# fluentd is not running as a manifest pod with appropriate label.
# TODO(piosz): remove this in 1.8
NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
# An extension to local SSDs allowing users to specify block/fs and SCSI/NVMe devices
# Format of this variable will be "#,scsi/nvme,block/fs" you can specify multiple
# configurations by separating them by a semi-colon ex. "2,scsi,fs;1,nvme,block"
@@ -51,6 +59,7 @@ PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}
KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true}
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-} # default value calculated below
CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
MIG_WAIT_UNTIL_STABLE_TIMEOUT=${MIG_WAIT_UNTIL_STABLE_TIMEOUT:-1800}
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
@@ -73,7 +82,6 @@ fi
# you are updating the os image versions, update this variable.
# Also please update corresponding image for node e2e at:
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
CVM_VERSION=${CVM_VERSION:-container-vm-v20170627}
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-65-10323-64-0}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
@@ -106,7 +114,6 @@ CLUSTER_NAME="${CLUSTER_NAME:-${INSTANCE_PREFIX}}"
MASTER_NAME="${INSTANCE_PREFIX}-master"
AGGREGATOR_MASTER_NAME="${INSTANCE_PREFIX}-aggregator"
INITIAL_ETCD_CLUSTER="${MASTER_NAME}"
ETCD_QUORUM_READ="${ENABLE_ETCD_QUORUM_READ:-false}"
MASTER_TAG="${INSTANCE_PREFIX}-master"
NODE_TAG="${INSTANCE_PREFIX}-minion"
@@ -164,16 +171,15 @@ ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}"
# Useful for scheduling heapster in large clusters with nodes of small size.
HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
# Historically fluentd was a manifest pod and then was migrated to DaemonSet.
# To avoid situation during cluster upgrade when there are two instances
# of fluentd running on a node, kubelet need to mark node on which
# fluentd is not running as a manifest pod with appropriate label.
# TODO(piosz): remove this in 1.8
NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
# NON_MASTER_NODE_LABELS are labels will only be applied on non-master nodes.
NON_MASTER_NODE_LABELS="${KUBE_NON_MASTER_NODE_LABELS:-}"
if [[ "${PREEMPTIBLE_MASTER}" == "true" ]]; then
NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-preemptible=true"
elif [[ "${PREEMPTIBLE_NODE}" == "true" ]]; then
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-preemptible=true"
fi
# To avoid running Calico on a node that is not configured appropriately,
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then
@@ -183,6 +189,8 @@ fi
# Optional: Enable netd.
ENABLE_NETD="${KUBE_ENABLE_NETD:-false}"
CUSTOM_NETD_YAML="${KUBE_CUSTOM_NETD_YAML:-}"
CUSTOM_CALICO_NODE_DAEMONSET_YAML="${KUBE_CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}"
CUSTOM_TYPHA_DEPLOYMENT_YAML="${KUBE_CUSTOM_TYPHA_DEPLOYMENT_YAML:-}"
# To avoid running netd on a node that is not configured appropriately,
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
@@ -195,6 +203,7 @@ fi
#
# TODO(#8867) Enable by default.
ENABLE_METADATA_CONCEALMENT="${ENABLE_METADATA_CONCEALMENT:-false}" # true, false
METADATA_CONCEALMENT_NO_FIREWALL="${METADATA_CONCEALMENT_NO_FIREWALL:-false}" # true, false
if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then
# Put the necessary label on the node so the daemonset gets scheduled.
NODE_LABELS="${NODE_LABELS},beta.kubernetes.io/metadata-proxy-ready=true"
@@ -216,7 +225,7 @@ resources:
- aesgcm:
keys:
- name: key1
secret: $(dd if=/dev/random bs=32 count=1 status=none | base64 | tr -d '\r\n')
secret: $(dd if=/dev/urandom iflag=fullblock bs=32 count=1 2>/dev/null | base64 | tr -d '\r\n')
EOM
)
fi
@@ -247,7 +256,7 @@ FEATURE_GATES="${KUBE_FEATURE_GATES:-ExperimentalCriticalPodAnnotation=true}"
if [[ ! -z "${NODE_ACCELERATORS}" ]]; then
FEATURE_GATES="${FEATURE_GATES},DevicePlugins=true"
if [[ "${NODE_ACCELERATORS}" =~ .*type=([a-zA-Z0-9-]+).* ]]; then
NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}"
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}"
fi
fi
@@ -286,9 +295,6 @@ if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
AUTOSCALER_EXPANDER_CONFIG="${KUBE_AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
fi
# Optional: Enable Rescheduler
ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
# Optional: Enable allocation of pod IPs using IP aliases.
#
# BETA FEATURE.
@@ -339,7 +345,7 @@ if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
fi
# Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection
ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
@@ -391,10 +397,6 @@ METADATA_CLOBBERS_CONFIG="${METADATA_CLOBBERS_CONFIG:-false}"
ENABLE_BIG_CLUSTER_SUBNETS="${ENABLE_BIG_CLUSTER_SUBNETS:-false}"
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then
echo "Warning: Basic audit logging is deprecated and will be removed. Please use advanced auditing instead."
fi
if [[ -n "${LOGROTATE_FILES_MAX_COUNT:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_FILES_MAX_COUNT"
fi
@@ -403,7 +405,9 @@ if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
fi
# Fluentd requirements
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.2-1.5.30-1-k8s}"
# YAML exists to trigger a configuration refresh when changes are made.
FLUENTD_GCP_YAML_VERSION="v3.1.0"
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.5-1.5.36-1-k8s}"
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}"
@@ -414,11 +418,14 @@ HEAPSTER_GCP_MEMORY_PER_NODE="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}"
HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
# Optional: custom system banner for dashboard addon
CUSTOM_KUBE_DASHBOARD_BANNER="${CUSTOM_KUBE_DASHBOARD_BANNER:-}"
# Default Stackdriver resources version exported by Fluentd-gcp addon
LOGGING_STACKDRIVER_RESOURCE_TYPES="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER LOGGING_STACKDRIVER_RESOURCE_TYPES"
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_YAML_VERSION FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER LOGGING_STACKDRIVER_RESOURCE_TYPES"
# Fluentd configuration for node-journal
ENABLE_NODE_JOURNAL="${ENABLE_NODE_JOURNAL:-false}"
@@ -453,3 +460,11 @@ if [[ "${ENABLE_TOKENREQUEST:-}" == "true" ]]; then
SERVICEACCOUNT_ISSUER="https://kubernetes.io/${CLUSTER_NAME}"
SERVICEACCOUNT_API_AUDIENCES="https://kubernetes.default.svc"
fi
# Optional: Enable Node termination Handler for Preemptible and GPU VMs.
# https://github.com/GoogleCloudPlatform/k8s-node-termination-handler
ENABLE_NODE_TERMINATION_HANDLER="${ENABLE_NODE_TERMINATION_HANDLER:-false}"
# Override default Node Termination Handler Image
if [[ "${NODE_TERMINATION_HANDLER_IMAGE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} NODE_TERMINATION_HANDLER_IMAGE"
fi

View File

@@ -37,6 +37,14 @@ MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)}
NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
NODE_LOCAL_SSDS=${NODE_LOCAL_SSDS:-0}
# Historically fluentd was a manifest pod and then was migrated to DaemonSet.
# To avoid situation during cluster upgrade when there are two instances
# of fluentd running on a node, kubelet need to mark node on which
# fluentd is not running as a manifest pod with appropriate label.
# TODO(piosz): remove this in 1.8
NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
# An extension to local SSDs allowing users to specify block/fs and SCSI/NVMe devices
# Format of this variable will be "#,scsi/nvme,block/fs" you can specify multiple
# configurations by separating them by a semi-colon ex. "2,scsi,fs;1,nvme,block"
@@ -50,6 +58,7 @@ PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}
KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true}
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-true}
CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
MIG_WAIT_UNTIL_STABLE_TIMEOUT=${MIG_WAIT_UNTIL_STABLE_TIMEOUT:-1800}
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
@@ -75,7 +84,6 @@ ALLOWED_NOTREADY_NODES="${ALLOWED_NOTREADY_NODES:-$((NUM_NODES / 100))}"
# you are updating the os image versions, update this variable.
# Also please update corresponding image for node e2e at:
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
CVM_VERSION=${CVM_VERSION:-container-vm-v20170627}
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-65-10323-64-0}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
@@ -101,7 +109,6 @@ CLUSTER_NAME="${CLUSTER_NAME:-${INSTANCE_PREFIX}}"
MASTER_NAME="${INSTANCE_PREFIX}-master"
AGGREGATOR_MASTER_NAME="${INSTANCE_PREFIX}-aggregator"
INITIAL_ETCD_CLUSTER="${MASTER_NAME}"
ETCD_QUORUM_READ="${ENABLE_ETCD_QUORUM_READ:-false}"
MASTER_TAG="${INSTANCE_PREFIX}-master"
NODE_TAG="${INSTANCE_PREFIX}-minion"
@@ -166,7 +173,7 @@ ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}"
# Useful for scheduling heapster in large clusters with nodes of small size.
HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.2.18-0) if you need
# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.2.24-1) if you need
# non-default version.
ETCD_IMAGE="${TEST_ETCD_IMAGE:-}"
ETCD_DOCKER_REPOSITORY="${TEST_ETCD_DOCKER_REPOSITORY:-}"
@@ -202,19 +209,20 @@ CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-} ${TEST_CLUSTER_R
SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
KUBEPROXY_TEST_ARGS="${KUBEPROXY_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
# Historically fluentd was a manifest pod and then was migrated to DaemonSet.
# To avoid situation during cluster upgrade when there are two instances
# of fluentd running on a node, kubelet need to mark node on which
# fluentd is not running as a manifest pod with appropriate label.
# TODO(piosz): remove this in 1.8
NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
# NON_MASTER_NODE_LABELS are labels will only be applied on non-master nodes.
NON_MASTER_NODE_LABELS="${KUBE_NON_MASTER_NODE_LABELS:-}"
if [[ "${PREEMPTIBLE_MASTER}" == "true" ]]; then
NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-preemptible=true"
elif [[ "${PREEMPTIBLE_NODE}" == "true" ]]; then
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-preemptible=true"
fi
# Optional: Enable netd.
ENABLE_NETD="${KUBE_ENABLE_NETD:-false}"
CUSTOM_NETD_YAML="${KUBE_CUSTOM_NETD_YAML:-}"
CUSTOM_CALICO_NODE_DAEMONSET_YAML="${KUBE_CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}"
CUSTOM_TYPHA_DEPLOYMENT_YAML="${KUBE_CUSTOM_TYPHA_DEPLOYMENT_YAML:-}"
# To avoid running netd on a node that is not configured appropriately,
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
@@ -231,6 +239,7 @@ fi
# Enable metadata concealment by firewalling pod traffic to the metadata server
# and run a proxy daemonset on nodes.
ENABLE_METADATA_CONCEALMENT="${ENABLE_METADATA_CONCEALMENT:-true}" # true, false
METADATA_CONCEALMENT_NO_FIREWALL="${METADATA_CONCEALMENT_NO_FIREWALL:-false}" # true, false
if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then
# Put the necessary label on the node so the daemonset gets scheduled.
NODE_LABELS="${NODE_LABELS},beta.kubernetes.io/metadata-proxy-ready=true"
@@ -254,7 +263,7 @@ fi
if [[ ! -z "${NODE_ACCELERATORS}" ]]; then
FEATURE_GATES="${FEATURE_GATES},DevicePlugins=true"
if [[ "${NODE_ACCELERATORS}" =~ .*type=([a-zA-Z0-9-]+).* ]]; then
NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}"
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}"
fi
fi
@@ -293,9 +302,6 @@ if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
AUTOSCALER_EXPANDER_CONFIG="${KUBE_AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
fi
# Optional: Enable Rescheduler
ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
# Optional: Enable allocation of pod IPs using IP aliases.
#
# BETA FEATURE.
@@ -346,7 +352,7 @@ if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
fi
if [[ -z "${KUBE_ADMISSION_CONTROL:-}" ]]; then
ADMISSION_CONTROL="Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,PodPreset,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection"
ADMISSION_CONTROL="NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,PodPreset,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection"
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
fi
@@ -383,10 +389,6 @@ HAIRPIN_MODE="${HAIRPIN_MODE:-hairpin-veth}" # promiscuous-bridge, hairpin-veth,
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
# Optional: if set to true, a image puller is deployed. Only for use in e2e clusters.
# TODO: Pipe this through GKE e2e clusters once we know it helps.
PREPULL_E2E_IMAGES="${PREPULL_E2E_IMAGES:-true}"
# Evict pods whenever compute resource availability on the nodes gets below a threshold.
EVICTION_HARD="${EVICTION_HARD:-memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%}"
@@ -405,10 +407,6 @@ ENABLE_LEGACY_ABAC="${ENABLE_LEGACY_ABAC:-false}" # true, false
ENABLE_APISERVER_ADVANCED_AUDIT="${ENABLE_APISERVER_ADVANCED_AUDIT:-true}" # true, false
ADVANCED_AUDIT_LOG_MODE="${ADVANCED_AUDIT_LOG_MODE:-batch}" # batch, blocking
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then
echo "Warning: Basic audit logging is deprecated and will be removed. Please use advanced auditing instead."
fi
ENABLE_BIG_CLUSTER_SUBNETS="${ENABLE_BIG_CLUSTER_SUBNETS:-false}"
if [[ -n "${LOGROTATE_FILES_MAX_COUNT:-}" ]]; then
@@ -419,7 +417,9 @@ if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
fi
# Fluentd requirements
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.2-1.5.30-1-k8s}"
# YAML exists to trigger a configuration refresh when changes are made.
FLUENTD_GCP_YAML_VERSION="v3.1.0"
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.5-1.5.36-1-k8s}"
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}"
@@ -430,11 +430,14 @@ HEAPSTER_GCP_MEMORY_PER_NODE="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}"
HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
# Optional: custom system banner for dashboard addon
CUSTOM_KUBE_DASHBOARD_BANNER="${CUSTOM_KUBE_DASHBOARD_BANNER:-}"
# Default Stackdriver resources version exported by Fluentd-gcp addon
LOGGING_STACKDRIVER_RESOURCE_TYPES="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER LOGGING_STACKDRIVER_RESOURCE_TYPES"
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_YAML_VERSION FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER LOGGING_STACKDRIVER_RESOURCE_TYPES"
# Fluentd configuration for node-journal
ENABLE_NODE_JOURNAL="${ENABLE_NODE_JOURNAL:-false}"
@@ -472,3 +475,11 @@ if [[ "${ENABLE_TOKENREQUEST:-}" == "true" ]]; then
SERVICEACCOUNT_ISSUER="https://kubernetes.io/${CLUSTER_NAME}"
SERVICEACCOUNT_API_AUDIENCES="https://kubernetes.default.svc"
fi
# Optional: Enable Node termination Handler for Preemptible and GPU VMs.
# https://github.com/GoogleCloudPlatform/k8s-node-termination-handler
ENABLE_NODE_TERMINATION_HANDLER="${ENABLE_NODE_TERMINATION_HANDLER:-false}"
# Override default Node Termination Handler Image
if [[ "${NODE_TERMINATION_HANDLER_IMAGE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} NODE_TERMINATION_HANDLER_IMAGE"
fi

View File

@@ -14,8 +14,8 @@ go_test(
],
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
],
)
@@ -35,6 +35,7 @@ release_filegroup(
pkg_tar(
name = "gci-trusty-manifests",
srcs = glob(["gke-internal-configure-helper.sh"]),
files = {
"//cluster/gce/gci/mounter": "gci-mounter",
"configure-helper.sh": "gci-configure-helper.sh",

4
vendor/k8s.io/kubernetes/cluster/gce/gci/OWNERS generated vendored Normal file
View File

@@ -0,0 +1,4 @@
approvers:
- dchen1107
- filbranden
- yguo0905

View File

@@ -51,7 +51,7 @@ readonly DOCKER_REGISTRY="k8s.gcr.io"
readonly ENABLE_LEGACY_ABAC=false
readonly ETC_MANIFESTS=${KUBE_HOME}/etc/kubernetes/manifests
readonly KUBE_API_SERVER_DOCKER_TAG=v1.11.0-alpha.0.1808_3c7452dc11645d-dirty
readonly LOG_OWNER_USER=$(whoami)
readonly LOG_OWNER_USER=$(id -un)
readonly LOG_OWNER_GROUP=$(id -gn)
ENCRYPTION_PROVIDER_CONFIG={{.EncryptionProviderConfig}}
ENCRYPTION_PROVIDER_CONFIG_PATH={{.EncryptionProviderConfigPath}}

View File

@@ -30,6 +30,10 @@ readonly UUID_BLOCK_PREFIX="/dev/disk/by-uuid/google-local-ssds"
readonly COREDNS_AUTOSCALER="Deployment/coredns"
readonly KUBEDNS_AUTOSCALER="Deployment/kube-dns"
# Resource requests of master components.
KUBE_CONTROLLER_MANAGER_CPU_REQUEST="${KUBE_CONTROLLER_MANAGER_CPU_REQUEST:-200m}"
KUBE_SCHEDULER_CPU_REQUEST="${KUBE_SCHEDULER_CPU_REQUEST:-75m}"
# Use --retry-connrefused opt only if it's supported by curl.
CURL_RETRY_CONNREFUSED=""
if curl --help | grep -q -- '--retry-connrefused'; then
@@ -43,6 +47,40 @@ function setup-os-params {
echo "core.%e.%p.%t" > /proc/sys/kernel/core_pattern
}
# secure_random generates a secure random string of bytes. This function accepts
# a number of secure bytes desired and returns a base64 encoded string with at
# least the requested entropy. Rather than directly reading from /dev/urandom,
# we use uuidgen which calls getrandom(2). getrandom(2) verifies that the
# entropy pool has been initialized sufficiently for the desired operation
# before reading from /dev/urandom.
#
# ARGS:
# #1: number of secure bytes to generate. We round up to the nearest factor of 32.
function secure_random {
local infobytes="${1}"
if ((infobytes <= 0)); then
echo "Invalid argument to secure_random: infobytes='${infobytes}'" 1>&2
return 1
fi
local out=""
for (( i = 0; i < "${infobytes}"; i += 32 )); do
# uuids have 122 random bits, sha256 sums have 256 bits, so concatenate
# three uuids and take their sum. The sum is encoded in ASCII hex, hence the
# 64 character cut.
out+="$(
(
uuidgen --random;
uuidgen --random;
uuidgen --random;
) | sha256sum \
| head -c 64
)";
done
# Finally, convert the ASCII hex to base64 to increase the density.
echo -n "${out}" | xxd -r -p | base64 -w 0
}
function config-ip-firewall {
echo "Configuring IP firewall rules"
@@ -51,18 +89,20 @@ function config-ip-firewall {
sysctl -w net.ipv4.conf.all.route_localnet=1
# The GCI image has host firewall which drop most inbound/forwarded packets.
# We need to add rules to accept all TCP/UDP/ICMP packets.
# We need to add rules to accept all TCP/UDP/ICMP/SCTP packets.
if iptables -w -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then
echo "Add rules to accept all inbound TCP/UDP/ICMP packets"
iptables -A INPUT -w -p TCP -j ACCEPT
iptables -A INPUT -w -p UDP -j ACCEPT
iptables -A INPUT -w -p ICMP -j ACCEPT
iptables -A INPUT -w -p SCTP -j ACCEPT
fi
if iptables -w -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then
echo "Add rules to accept all forwarded TCP/UDP/ICMP packets"
echo "Add rules to accept all forwarded TCP/UDP/ICMP/SCTP packets"
iptables -A FORWARD -w -p TCP -j ACCEPT
iptables -A FORWARD -w -p UDP -j ACCEPT
iptables -A FORWARD -w -p ICMP -j ACCEPT
iptables -A FORWARD -w -p SCTP -j ACCEPT
fi
# Flush iptables nat table
@@ -568,6 +608,12 @@ EOF
cat <<EOF >>/etc/gce.conf
token-url = ${TOKEN_URL}
token-body = ${TOKEN_BODY}
EOF
fi
if [[ -n "${CONTAINER_API_ENDPOINT:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
container-api-endpoint = ${CONTAINER_API_ENDPOINT}
EOF
fi
if [[ -n "${PROJECT_ID:-}" ]]; then
@@ -615,6 +661,15 @@ EOF
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
multizone = ${MULTIZONE}
EOF
fi
# Multimaster indicates that the cluster is HA.
# Currently the only HA clusters are regional.
# If we introduce zonal multimaster this will need to be revisited.
if [[ -n "${MULTIMASTER:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
regional = ${MULTIMASTER}
EOF
fi
if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then
@@ -1152,6 +1207,7 @@ function start-kubelet {
local -r kubelet_env_file="/etc/default/kubelet"
local kubelet_opts="${KUBELET_ARGS} ${KUBELET_CONFIG_FILE_ARG:-}"
echo "KUBELET_OPTS=\"${kubelet_opts}\"" > "${kubelet_env_file}"
echo "KUBE_COVERAGE_FILE=\"/var/log/kubelet.cov\"" >> "${kubelet_env_file}"
# Write the systemd service file for kubelet.
cat <<EOF >/etc/systemd/system/kubelet.service
@@ -1170,6 +1226,7 @@ ExecStart=${kubelet_bin} \$KUBELET_OPTS
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl start kubelet.service
}
@@ -1181,13 +1238,18 @@ function start-node-problem-detector {
local -r km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor.json"
# TODO(random-liu): Handle this for alternative container runtime.
local -r dm_config="${KUBE_HOME}/node-problem-detector/config/docker-monitor.json"
local -r custom_km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor-counter.json"
echo "Using node problem detector binary at ${npd_bin}"
local flags="${NPD_TEST_LOG_LEVEL:-"--v=2"} ${NPD_TEST_ARGS:-}"
flags+=" --logtostderr"
flags+=" --system-log-monitors=${km_config},${dm_config}"
flags+=" --custom-plugin-monitors=${custom_km_config}"
flags+=" --apiserver-override=https://${KUBERNETES_MASTER_NAME}?inClusterConfig=false&auth=/var/lib/node-problem-detector/kubeconfig"
local -r npd_port=${NODE_PROBLEM_DETECTOR_PORT:-20256}
flags+=" --port=${npd_port}"
if [[ -n "${EXTRA_NPD_ARGS:-}" ]]; then
flags+=" ${EXTRA_NPD_ARGS}"
fi
# Write the systemd service file for node problem detector.
cat <<EOF >/etc/systemd/system/node-problem-detector.service
@@ -1256,10 +1318,6 @@ function prepare-kube-proxy-manifest-variables {
kube_cache_mutation_detector_env_name="- name: KUBE_CACHE_MUTATION_DETECTOR"
kube_cache_mutation_detector_env_value="value: \"${ENABLE_CACHE_MUTATION_DETECTOR}\""
fi
local pod_priority=""
if [[ "${ENABLE_POD_PRIORITY:-}" == "true" ]]; then
pod_priority="priorityClassName: system-node-critical"
fi
sed -i -e "s@{{kubeconfig}}@${kubeconfig}@g" ${src_file}
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${kube_docker_registry}@g" ${src_file}
sed -i -e "s@{{pillar\['kube-proxy_docker_tag'\]}}@${kube_proxy_docker_tag}@g" ${src_file}
@@ -1267,7 +1325,6 @@ function prepare-kube-proxy-manifest-variables {
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
sed -i -e "s@{{kube_cache_mutation_detector_env_name}}@${kube_cache_mutation_detector_env_name}@g" ${src_file}
sed -i -e "s@{{kube_cache_mutation_detector_env_value}}@${kube_cache_mutation_detector_env_value}@g" ${src_file}
sed -i -e "s@{{pod_priority}}@${pod_priority}@g" ${src_file}
sed -i -e "s@{{ cpurequest }}@100m@g" ${src_file}
sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" ${src_file}
sed -i -e "s@{{kubernetes_service_host_env_value}}@${KUBERNETES_MASTER_NAME}@g" ${src_file}
@@ -1326,7 +1383,6 @@ function prepare-etcd-manifest {
sed -i -e "s@{{ *cpulimit *}}@\"$4\"@g" "${temp_file}"
sed -i -e "s@{{ *hostname *}}@$host_name@g" "${temp_file}"
sed -i -e "s@{{ *host_ip *}}@$host_ip@g" "${temp_file}"
sed -i -e "s@{{ *srv_kube_path *}}@/etc/srv/kubernetes@g" "${temp_file}"
sed -i -e "s@{{ *etcd_cluster *}}@$etcd_cluster@g" "${temp_file}"
sed -i -e "s@{{ *liveness_probe_initial_delay *}}@${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${temp_file}"
# Get default storage backend from manifest file.
@@ -1459,8 +1515,12 @@ function start-kube-apiserver {
params+=" --allow-privileged=true"
params+=" --cloud-provider=gce"
params+=" --client-ca-file=${CA_CERT_BUNDLE_PATH}"
params+=" --etcd-servers=http://127.0.0.1:2379"
params+=" --etcd-servers-overrides=/events#http://127.0.0.1:4002"
params+=" --etcd-servers=${ETCD_SERVERS:-http://127.0.0.1:2379}"
if [[ -z "${ETCD_SERVERS:-}" ]]; then
params+=" --etcd-servers-overrides=${ETCD_SERVERS_OVERRIDES:-/events#http://127.0.0.1:4002}"
elif [[ -n "${ETCD_SERVERS_OVERRIDES:-}" ]]; then
params+=" --etcd-servers-overrides=${ETCD_SERVERS_OVERRIDES:-}"
fi
params+=" --secure-port=443"
params+=" --tls-cert-file=${APISERVER_SERVER_CERT_PATH}"
params+=" --tls-private-key-file=${APISERVER_SERVER_KEY_PATH}"
@@ -1516,9 +1576,6 @@ function start-kube-apiserver {
if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
fi
if [[ -n "${ETCD_QUORUM_READ:-}" ]]; then
params+=" --etcd-quorum-read=${ETCD_QUORUM_READ}"
fi
if [[ -n "${SERVICEACCOUNT_ISSUER:-}" ]]; then
params+=" --service-account-issuer=${SERVICEACCOUNT_ISSUER}"
params+=" --service-account-signing-key-file=${SERVICEACCOUNT_KEY_PATH}"
@@ -1529,26 +1586,7 @@ function start-kube-apiserver {
local audit_policy_config_volume=""
local audit_webhook_config_mount=""
local audit_webhook_config_volume=""
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then
# We currently only support enabling with a fixed path and with built-in log
# rotation "disabled" (large value) so it behaves like kube-apiserver.log.
# External log rotation should be set up the same as for kube-apiserver.log.
params+=" --audit-log-path=/var/log/kube-apiserver-audit.log"
params+=" --audit-log-maxage=0"
params+=" --audit-log-maxbackup=0"
# Lumberjack doesn't offer any way to disable size-based rotation. It also
# has an in-memory counter that doesn't notice if you truncate the file.
# 2000000000 (in MiB) is a large number that fits in 31 bits. If the log
# grows at 10MiB/s (~30K QPS), it will rotate after ~6 years if apiserver
# never restarts. Please manually restart apiserver before this time.
params+=" --audit-log-maxsize=2000000000"
# Disable AdvancedAuditing enabled by default
if [[ -z "${FEATURE_GATES:-}" ]]; then
FEATURE_GATES="AdvancedAuditing=false"
else
FEATURE_GATES="${FEATURE_GATES},AdvancedAuditing=false"
fi
elif [[ "${ENABLE_APISERVER_ADVANCED_AUDIT:-}" == "true" ]]; then
if [[ "${ENABLE_APISERVER_ADVANCED_AUDIT:-}" == "true" ]]; then
local -r audit_policy_file="/etc/audit_policy.config"
params+=" --audit-policy-file=${audit_policy_file}"
# Create the audit policy file, and mount it into the apiserver pod.
@@ -1596,8 +1634,6 @@ function start-kube-apiserver {
fi
fi
if [[ "${ADVANCED_AUDIT_BACKEND:-}" == *"webhook"* ]]; then
params+=" --audit-webhook-mode=batch"
# Create the audit webhook config file, and mount it into the apiserver pod.
local -r audit_webhook_config_file="/etc/audit_webhook.config"
params+=" --audit-webhook-config-file=${audit_webhook_config_file}"
@@ -1608,6 +1644,8 @@ function start-kube-apiserver {
# Batching parameters
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_MODE:-}" ]]; then
params+=" --audit-webhook-mode=${ADVANCED_AUDIT_WEBHOOK_MODE}"
else
params+=" --audit-webhook-mode=batch"
fi
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE:-}" ]]; then
params+=" --audit-webhook-batch-buffer-size=${ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE}"
@@ -1773,7 +1811,6 @@ EOM
local -r kube_apiserver_docker_tag="${KUBE_API_SERVER_DOCKER_TAG:-$(cat /home/kubernetes/kube-docker-files/kube-apiserver.docker_tag)}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{srv_sshproxy_path}}@/etc/srv/sshproxy@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
@@ -1943,7 +1980,6 @@ function start-kube-controller-manager {
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-controller-manager.manifest"
# Evaluate variables.
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-controller-manager_docker_tag'\]}}@${kube_rc_docker_tag}@g" "${src_file}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
@@ -1956,6 +1992,7 @@ function start-kube-controller-manager {
sed -i -e "s@{{pv_recycler_volume}}@${PV_RECYCLER_VOLUME}@g" "${src_file}"
sed -i -e "s@{{flexvolume_hostpath_mount}}@${FLEXVOLUME_HOSTPATH_MOUNT}@g" "${src_file}"
sed -i -e "s@{{flexvolume_hostpath}}@${FLEXVOLUME_HOSTPATH_VOLUME}@g" "${src_file}"
sed -i -e "s@{{cpurequest}}@${KUBE_CONTROLLER_MANAGER_CPU_REQUEST}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
@@ -1990,10 +2027,10 @@ function start-kube-scheduler {
# Remove salt comments and replace variables with values.
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest"
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-scheduler_docker_tag'\]}}@${kube_scheduler_docker_tag}@g" "${src_file}"
sed -i -e "s@{{cpurequest}}@${KUBE_SCHEDULER_CPU_REQUEST}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
@@ -2042,6 +2079,12 @@ function setup-addon-manifests {
copy-manifests "${psp_dir}" "${dst_dir}"
fi
fi
if [[ "${ENABLE_NODE_TERMINATION_HANDLER:-}" == "true" ]]; then
local -r nth_dir="${src_dir}/${3:-$2}/node-termination-handler"
if [[ -d "${nth_dir}" ]]; then
copy-manifests "${nth_dir}" "${dst_dir}"
fi
fi
}
# A function that downloads extra addons from a URL and puts them in the GCI
@@ -2199,6 +2242,17 @@ function update-prometheus-to-sd-parameters {
fi
}
# Updates parameters in yaml file for prometheus-to-sd configuration in daemon sets, or
# removes component if it is disabled.
function update-daemon-set-prometheus-to-sd-parameters {
if [[ "${DISABLE_PROMETHEUS_TO_SD_IN_DS:-}" == "true" ]]; then
# Removes all lines between two patterns (throws away prometheus-to-sd)
sed -i -e "/# BEGIN_PROMETHEUS_TO_SD/,/# END_PROMETHEUS_TO_SD/d" "$1"
else
update-prometheus-to-sd-parameters $1
fi
}
# Updates parameters in yaml file for event-exporter configuration
function update-event-exporter {
local -r stackdriver_resource_model="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}"
@@ -2231,6 +2285,7 @@ function setup-coredns-manifest {
function setup-fluentd {
local -r dst_dir="$1"
local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml"
local -r fluentd_gcp_scaler_yaml="${dst_dir}/fluentd-gcp/scaler-deployment.yaml"
# Ingest logs against new resources like "k8s_container" and "k8s_node" if
# LOGGING_STACKDRIVER_RESOURCE_TYPES is "new".
# Ingest logs against old resources like "gke_container" and "gce_instance" if
@@ -2243,9 +2298,12 @@ function setup-fluentd {
fluentd_gcp_configmap_name="fluentd-gcp-config-old"
fi
sed -i -e "s@{{ fluentd_gcp_configmap_name }}@${fluentd_gcp_configmap_name}@g" "${fluentd_gcp_yaml}"
fluentd_gcp_version="${FLUENTD_GCP_VERSION:-0.2-1.5.30-1-k8s}"
fluentd_gcp_yaml_version="${FLUENTD_GCP_YAML_VERSION:-v3.1.0}"
sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_yaml}"
sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_scaler_yaml}"
fluentd_gcp_version="${FLUENTD_GCP_VERSION:-0.5-1.5.36-1-k8s}"
sed -i -e "s@{{ fluentd_gcp_version }}@${fluentd_gcp_version}@g" "${fluentd_gcp_yaml}"
update-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
update-daemon-set-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
start-fluentd-resource-update ${fluentd_gcp_yaml}
update-container-runtime ${fluentd_gcp_configmap_yaml}
update-node-journal ${fluentd_gcp_configmap_yaml}
@@ -2258,7 +2316,7 @@ function setup-kube-dns-manifest {
if [ -n "${CUSTOM_KUBE_DNS_YAML:-}" ]; then
# Replace with custom GKE kube-dns deployment.
cat > "${kubedns_file}" <<EOF
$(echo "$CUSTOM_KUBE_DNS_YAML")
$CUSTOM_KUBE_DNS_YAML
EOF
update-prometheus-to-sd-parameters ${kubedns_file}
fi
@@ -2281,7 +2339,24 @@ function setup-netd-manifest {
if [ -n "${CUSTOM_NETD_YAML:-}" ]; then
# Replace with custom GCP netd deployment.
cat > "${netd_file}" <<EOF
$(echo "$CUSTOM_NETD_YAML")
$CUSTOM_NETD_YAML
EOF
fi
}
# A helper function to set up a custom yaml for a k8s addon.
#
# $1: addon category under /etc/kubernetes
# $2: manifest source dir
# $3: manifest file
# $4: custom yaml
function setup-addon-custom-yaml {
local -r manifest_path="/etc/kubernetes/$1/$2/$3"
local -r custom_yaml="$4"
if [ -n "${custom_yaml:-}" ]; then
# Replace with custom manifest.
cat > "${manifest_path}" <<EOF
$custom_yaml
EOF
fi
}
@@ -2312,9 +2387,9 @@ function start-kube-addons {
if [ -n "${CUSTOM_KUBE_PROXY_YAML:-}" ]; then
# Replace with custom GKE kube proxy.
cat > "$src_dir/kube-proxy/kube-proxy-ds.yaml" <<EOF
$(echo "$CUSTOM_KUBE_PROXY_YAML")
$CUSTOM_KUBE_PROXY_YAML
EOF
update-prometheus-to-sd-parameters "$src_dir/kube-proxy/kube-proxy-ds.yaml"
update-daemon-set-prometheus-to-sd-parameters "$src_dir/kube-proxy/kube-proxy-ds.yaml"
fi
prepare-kube-proxy-manifest-variables "$src_dir/kube-proxy/kube-proxy-ds.yaml"
setup-addon-manifests "addons" "kube-proxy"
@@ -2337,10 +2412,17 @@ EOF
base_eventer_memory="190Mi"
base_metrics_cpu="${HEAPSTER_GCP_BASE_CPU:-80m}"
nanny_memory="90Mi"
local -r metrics_memory_per_node="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
local heapster_min_cluster_size="16"
local metrics_memory_per_node="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
local -r metrics_cpu_per_node="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
local -r eventer_memory_per_node="500"
local -r nanny_memory_per_node="200"
if [[ "${ENABLE_SYSTEM_ADDON_RESOURCE_OPTIMIZATIONS:-}" == "true" ]]; then
base_metrics_memory="${HEAPSTER_GCP_BASE_MEMORY:-100Mi}"
base_metrics_cpu="${HEAPSTER_GCP_BASE_CPU:-10m}"
metrics_memory_per_node="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
heapster_min_cluster_size="5"
fi
if [[ -n "${NUM_NODES:-}" && "${NUM_NODES}" -ge 1 ]]; then
num_kube_nodes="$((${NUM_NODES}+1))"
nanny_memory="$((${num_kube_nodes} * ${nanny_memory_per_node} + 90 * 1024))Ki"
@@ -2361,6 +2443,7 @@ EOF
sed -i -e "s@{{ *eventer_memory_per_node *}}@${eventer_memory_per_node}@g" "${controller_yaml}"
sed -i -e "s@{{ *nanny_memory *}}@${nanny_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *metrics_cpu_per_node *}}@${metrics_cpu_per_node}@g" "${controller_yaml}"
sed -i -e "s@{{ *heapster_min_cluster_size *}}@${heapster_min_cluster_size}@g" "${controller_yaml}"
update-prometheus-to-sd-parameters ${controller_yaml}
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "stackdriver" ]]; then
@@ -2388,10 +2471,29 @@ EOF
fi
if [[ "${ENABLE_METRICS_SERVER:-}" == "true" ]]; then
setup-addon-manifests "addons" "metrics-server"
base_metrics_server_cpu="40m"
base_metrics_server_memory="40Mi"
metrics_server_memory_per_node="4"
metrics_server_min_cluster_size="16"
if [[ "${ENABLE_SYSTEM_ADDON_RESOURCE_OPTIMIZATIONS:-}" == "true" ]]; then
base_metrics_server_cpu="40m"
base_metrics_server_memory="35Mi"
metrics_server_memory_per_node="4"
metrics_server_min_cluster_size="5"
fi
local -r metrics_server_yaml="${dst_dir}/metrics-server/metrics-server-deployment.yaml"
sed -i -e "s@{{ base_metrics_server_cpu }}@${base_metrics_server_cpu}@g" "${metrics_server_yaml}"
sed -i -e "s@{{ base_metrics_server_memory }}@${base_metrics_server_memory}@g" "${metrics_server_yaml}"
sed -i -e "s@{{ metrics_server_memory_per_node }}@${metrics_server_memory_per_node}@g" "${metrics_server_yaml}"
sed -i -e "s@{{ metrics_server_min_cluster_size }}@${metrics_server_min_cluster_size}@g" "${metrics_server_yaml}"
fi
if [[ "${ENABLE_NVIDIA_GPU_DEVICE_PLUGIN:-}" == "true" ]]; then
setup-addon-manifests "addons" "device-plugins/nvidia-gpu"
fi
if [[ "${ENABLE_NODE_TERMINATION_HANDLER:-}" == "true" ]]; then
setup-addon-manifests "addons" "node-termination-handler"
setup-node-termination-handler-manifest
fi
if [[ "${ENABLE_CLUSTER_DNS:-}" == "true" ]]; then
if [[ "${CLUSTER_DNS_CORE_DNS:-}" == "true" ]]; then
setup-addon-manifests "addons" "dns/coredns"
@@ -2437,6 +2539,9 @@ EOF
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
setup-addon-manifests "addons" "calico-policy-controller"
setup-addon-custom-yaml "addons" "calico-policy-controller" "calico-node-daemonset.yaml" "${CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}"
setup-addon-custom-yaml "addons" "calico-policy-controller" "typha-deployment.yaml" "${CUSTOM_TYPHA_DEPLOYMENT_YAML:-}"
# Configure Calico CNI directory.
local -r ds_file="${dst_dir}/calico-policy-controller/calico-node-daemonset.yaml"
sed -i -e "s@__CALICO_CNI_DIR__@/home/kubernetes/bin@g" "${ds_file}"
@@ -2450,7 +2555,7 @@ EOF
if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]]; then
setup-addon-manifests "addons" "metadata-proxy/gce"
local -r metadata_proxy_yaml="${dst_dir}/metadata-proxy/gce/metadata-proxy.yaml"
update-prometheus-to-sd-parameters ${metadata_proxy_yaml}
update-daemon-set-prometheus-to-sd-parameters ${metadata_proxy_yaml}
fi
if [[ "${ENABLE_ISTIO:-}" == "true" ]]; then
if [[ "${ISTIO_AUTH_TYPE:-}" == "MUTUAL_TLS" ]]; then
@@ -2459,21 +2564,26 @@ EOF
setup-addon-manifests "addons" "istio/noauth"
fi
fi
if [[ "${FEATURE_GATES:-}" =~ "RuntimeClass=true" ]]; then
setup-addon-manifests "addons" "runtimeclass"
fi
if [[ -n "${EXTRA_ADDONS_URL:-}" ]]; then
download-extra-addons
setup-addon-manifests "addons" "gce-extras"
fi
# Place addon manager pod manifest.
cp "${src_dir}/kube-addon-manager.yaml" /etc/kubernetes/manifests
src_file="${src_dir}/kube-addon-manager.yaml"
sed -i -e "s@{{kubectl_extra_prune_whitelist}}@${ADDON_MANAGER_PRUNE_WHITELIST:-}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
# Starts an image-puller - used in test clusters.
function start-image-puller {
echo "Start image-puller"
local -r e2e_image_puller_manifest="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/e2e-image-puller.manifest"
update-container-runtime "${e2e_image_puller_manifest}"
cp "${e2e_image_puller_manifest}" /etc/kubernetes/manifests/
function setup-node-termination-handler-manifest {
local -r nth_manifest="/etc/kubernetes/$1/$2/daemonset.yaml"
if [[ -n "${NODE_TERMINATION_HANDLER_IMAGE}" ]]; then
sed -i "s|image:.*|image: ${NODE_TERMINATION_HANDLER_IMAGE}|" "${nth_manifest}"
fi
}
# Setups manifests for ingress controller and gce-specific policies for service controller.
@@ -2502,16 +2612,6 @@ function start-lb-controller {
fi
}
# Starts rescheduler.
function start-rescheduler {
if [[ "${ENABLE_RESCHEDULER:-}" == "true" ]]; then
echo "Start Rescheduler"
prepare-log-file /var/log/rescheduler.log
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/rescheduler.manifest" \
/etc/kubernetes/manifests/
fi
}
# Setup working directory for kubelet.
function setup-kubelet-dir {
echo "Making /var/lib/kubelet executable for kubelet"
@@ -2519,6 +2619,15 @@ function setup-kubelet-dir {
mount -B -o remount,exec,suid,dev /var/lib/kubelet
}
# Override for GKE custom master setup scripts (no-op outside of GKE).
function gke-master-start {
if [[ -e "${KUBE_HOME}/bin/gke-internal-configure-helper.sh" ]]; then
echo "Running GKE internal configuration script"
. "${KUBE_HOME}/bin/gke-internal-configure-helper.sh"
gke-internal-master-start
fi
}
function reset-motd {
# kubelet is installed both on the master and nodes, and the version is easy to parse (unlike kubectl)
local -r version="$("${KUBE_HOME}"/bin/kubelet --version=true | cut -f2 -d " ")"
@@ -2637,9 +2746,9 @@ function main() {
fi
# generate the controller manager, scheduler and cluster autoscaler tokens here since they are only used on the master.
KUBE_CONTROLLER_MANAGER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_SCHEDULER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_CLUSTER_AUTOSCALER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_CONTROLLER_MANAGER_TOKEN="$(secure_random 32)"
KUBE_SCHEDULER_TOKEN="$(secure_random 32)"
KUBE_CLUSTER_AUTOSCALER_TOKEN="$(secure_random 32)"
setup-os-params
config-ip-firewall
@@ -2655,6 +2764,7 @@ function main() {
create-master-kubelet-auth
create-master-etcd-auth
override-pv-recycler
gke-master-start
else
create-node-pki
create-kubelet-kubeconfig ${KUBERNETES_MASTER_NAME}
@@ -2683,14 +2793,10 @@ function main() {
start-kube-addons
start-cluster-autoscaler
start-lb-controller
start-rescheduler
else
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
start-kube-proxy
fi
if [[ "${PREPULL_E2E_IMAGES:-}" == "true" ]]; then
start-image-puller
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
start-node-problem-detector
fi

View File

@@ -26,10 +26,10 @@ set -o pipefail
### Hardcoded constants
DEFAULT_CNI_VERSION="v0.6.0"
DEFAULT_CNI_SHA1="d595d3ded6499a64e8dac02466e2f5f2ce257c9f"
DEFAULT_NPD_VERSION="v0.4.1"
DEFAULT_NPD_SHA1="a57a3fe64cab8a18ec654f5cef0aec59dae62568"
DEFAULT_CRICTL_VERSION="v1.11.0"
DEFAULT_CRICTL_SHA1="8f5142b985d314cdebb51afd55054d5ec00c442a"
DEFAULT_NPD_VERSION="v0.5.0"
DEFAULT_NPD_SHA1="650ecfb2ae495175ee43706d0bd862a1ea7f1395"
DEFAULT_CRICTL_VERSION="v1.12.0"
DEFAULT_CRICTL_SHA1="82ef8b44849f9da0589c87e9865d4716573eec7f"
DEFAULT_MOUNTER_TAR_SHA="8003b798cf33c7f91320cd6ee5cec4fa22244571"
###
@@ -247,6 +247,11 @@ function install-crictl {
fi
local -r crictl="crictl-${crictl_version}-linux-amd64"
# Create crictl config file.
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: ${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}
EOF
if is-preloaded "${crictl}" "${crictl_sha1}"; then
echo "crictl is preloaded"
return
@@ -257,11 +262,6 @@ function install-crictl {
download-or-bust "${crictl_sha1}" "${crictl_path}/${crictl}"
mv "${KUBE_HOME}/${crictl}" "${KUBE_BIN}/crictl"
chmod a+x "${KUBE_BIN}/crictl"
# Create crictl config file.
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: ${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}
EOF
}
function install-exec-auth-plugin {
@@ -273,7 +273,8 @@ function install-exec-auth-plugin {
echo "Downloading gke-exec-auth-plugin binary"
download-or-bust "${plugin_sha1}" "${plugin_url}"
mv "${KUBE_HOME}/gke-exec-auth-plugin" "${KUBE_BIN}"
mv "${KUBE_HOME}/gke-exec-auth-plugin" "${KUBE_BIN}/gke-exec-auth-plugin"
chmod a+x "${KUBE_BIN}/gke-exec-auth-plugin"
}
function install-kube-manifests {
@@ -306,6 +307,10 @@ function install-kube-manifests {
xargs sed -ri "s@(image\":\s+\")k8s.gcr.io@\1${kube_addon_registry}@"
fi
cp "${dst_dir}/kubernetes/gci-trusty/gci-configure-helper.sh" "${KUBE_BIN}/configure-helper.sh"
if [[ -e "${dst_dir}/kubernetes/gci-trusty/gke-internal-configure-helper.sh" ]]; then
cp "${dst_dir}/kubernetes/gci-trusty/gke-internal-configure-helper.sh" "${KUBE_BIN}/"
fi
cp "${dst_dir}/kubernetes/gci-trusty/health-monitor.sh" "${KUBE_BIN}/health-monitor.sh"
rm -f "${KUBE_HOME}/${manifests_tar}"

View File

@@ -15,7 +15,6 @@ filegroup(
srcs = [
"abac-authz-policy.jsonl",
"cluster-autoscaler.manifest",
"e2e-image-puller.manifest",
"etcd.manifest",
"etcd-empty-dir-cleanup.yaml",
"glbc.manifest",
@@ -25,8 +24,7 @@ filegroup(
"kube-controller-manager.manifest",
"kube-proxy.manifest",
"kube-scheduler.manifest",
"rescheduler.manifest",
],
] + glob(["internal-*"]),
)
filegroup(

View File

@@ -17,7 +17,7 @@
"containers": [
{
"name": "cluster-autoscaler",
"image": "k8s.gcr.io/cluster-autoscaler:v1.3.0",
"image": "k8s.gcr.io/cluster-autoscaler:v1.12.0",
"livenessProbe": {
"httpGet": {
"path": "/health-check",
@@ -33,6 +33,7 @@
"--logtostderr=true",
"--write-status-configmap=true",
"--balance-similar-node-groups=true",
"--expendable-pods-priority-cutoff=-10",
"{{params}}"
],
"env": [

View File

@@ -1,117 +0,0 @@
# e2e-image-puller seeds nodes in an e2e cluster with test images.
apiVersion: v1
kind: Pod
metadata:
name: e2e-image-puller
namespace: kube-system
labels:
name: e2e-image-puller
spec:
containers:
- name: image-puller
resources:
requests:
cpu: 100m
limits:
cpu: 100m
image: k8s.gcr.io/busybox:1.24
# TODO: Replace this with a go script that pulls in parallel?
# Currently it takes ~5m to pull all e2e images, so this is OK, and
# fewer moving parts is always better.
# TODO: Replace the hardcoded image list with an autogen list; the list is
# currently hard-coded for static verification. It was generated via:
# grep -Iiroh "gcr.io/.*" "${KUBE_ROOT}/test/e2e" | \
# sed -e "s/[,\")}]//g" | awk '{print $1}' | sort | uniq | tr '\n' ' '
# We always want the subshell to exit 0 so this pod doesn't end up
# blocking tests in an Error state.
command:
- /bin/sh
- -c
- >
for i in
k8s.gcr.io/alpine-with-bash:1.0
k8s.gcr.io/apparmor-loader:0.1
k8s.gcr.io/busybox:1.24
k8s.gcr.io/dnsutils:e2e
k8s.gcr.io/e2e-net-amd64:1.0
k8s.gcr.io/echoserver:1.10
k8s.gcr.io/eptest:0.1
k8s.gcr.io/fakegitserver:0.1
k8s.gcr.io/galera-install:0.1
k8s.gcr.io/invalid-image:invalid-tag
k8s.gcr.io/iperf:e2e
k8s.gcr.io/jessie-dnsutils:e2e
k8s.gcr.io/k8s-dns-dnsmasq-amd64:1.14.5
k8s.gcr.io/liveness:e2e
k8s.gcr.io/logs-generator:v0.1.0
k8s.gcr.io/mounttest:0.8
k8s.gcr.io/mounttest-user:0.5
k8s.gcr.io/mysql-galera:e2e
k8s.gcr.io/mysql-healthz:1.0
k8s.gcr.io/netexec:1.4
k8s.gcr.io/netexec:1.5
k8s.gcr.io/netexec:1.7
k8s.gcr.io/nettest:1.7
k8s.gcr.io/nginx:1.7.9
k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.1
k8s.gcr.io/nginx-slim:0.7
k8s.gcr.io/nginx-slim:0.8
k8s.gcr.io/node-problem-detector:v0.3.0
k8s.gcr.io/pause
k8s.gcr.io/porter:4524579c0eb935c056c8e75563b4e1eda31587e0
k8s.gcr.io/portforwardtester:1.2
k8s.gcr.io/redis-install-3.2.0:e2e
k8s.gcr.io/resource_consumer:beta4
k8s.gcr.io/resource_consumer/controller:beta4
gcr.io/kubernetes-e2e-test-images/serve-hostname-amd64:1.1
gcr.io/kubernetes-e2e-test-images/hostexec-amd64:1.1
k8s.gcr.io/servicelb:0.1
k8s.gcr.io/test-webserver:e2e
k8s.gcr.io/update-demo:kitten
k8s.gcr.io/update-demo:nautilus
gcr.io/kubernetes-e2e-test-images/volume-ceph:0.1
gcr.io/kubernetes-e2e-test-images/volume-gluster:0.2
gcr.io/kubernetes-e2e-test-images/volume-iscsi:0.1
gcr.io/kubernetes-e2e-test-images/volume-nfs:0.8
gcr.io/kubernetes-e2e-test-images/volume-rbd:0.1
k8s.gcr.io/zookeeper-install-3.5.0-alpha:e2e
gcr.io/google_samples/gb-redisslave:nonexistent
; do echo $(date '+%X') pulling $i; crictl pull $i 1>/dev/null; done; exit 0;
securityContext:
privileged: true
volumeMounts:
- mountPath: {{ container_runtime_endpoint }}
name: socket
- mountPath: /usr/bin/crictl
name: crictl
- mountPath: /etc/crictl.yaml
name: config
# Add a container that runs a health-check
- name: nethealth-check
resources:
requests:
cpu: 100m
limits:
cpu: 100m
image: k8s.gcr.io/kube-nethealth-amd64:1.0
command:
- /bin/sh
- -c
- "/usr/bin/nethealth || true"
volumes:
- hostPath:
path: {{ container_runtime_endpoint }}
type: Socket
name: socket
- hostPath:
path: /home/kubernetes/bin/crictl
type: File
name: crictl
- hostPath:
path: /etc/crictl.yaml
type: File
name: config
# This pod is really fire-and-forget.
restartPolicy: OnFailure
# This pod needs hostNetworking for true VM perf measurement as well as avoiding cbr0 issues
hostNetwork: true

View File

@@ -14,4 +14,4 @@ spec:
dnsPolicy: Default
containers:
- name: etcd-empty-dir-cleanup
image: k8s.gcr.io/etcd-empty-dir-cleanup:3.2.18.0
image: k8s.gcr.io/etcd-empty-dir-cleanup:3.2.24.0

View File

@@ -14,7 +14,7 @@
"containers":[
{
"name": "etcd-container",
"image": "{{ pillar.get('etcd_docker_repository', 'k8s.gcr.io/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.2.18-0') }}",
"image": "{{ pillar.get('etcd_docker_repository', 'k8s.gcr.io/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.2.24-1') }}",
"resources": {
"requests": {
"cpu": {{ cpulimit }}
@@ -30,7 +30,7 @@
"value": "{{ pillar.get('storage_backend', 'etcd3') }}"
},
{ "name": "TARGET_VERSION",
"value": "{{ pillar.get('etcd_version', '3.2.18') }}"
"value": "{{ pillar.get('etcd_version', '3.2.24') }}"
},
{ "name": "DATA_DIRECTORY",
"value": "/var/etcd/data{{ suffix }}"
@@ -80,7 +80,7 @@
"readOnly": false
},
{ "name": "etc",
"mountPath": "{{ srv_kube_path }}",
"mountPath": "/etc/srv/kubernetes",
"readOnly": false
}
]
@@ -98,7 +98,7 @@
},
{ "name": "etc",
"hostPath": {
"path": "{{ srv_kube_path }}"}
"path": "/etc/srv/kubernetes"}
}
]
}}

View File

@@ -1,20 +1,20 @@
apiVersion: v1
kind: Pod
metadata:
name: l7-lb-controller-v1.1.1
name: l7-lb-controller-v1.2.3
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
labels:
k8s-app: gcp-lb-controller
version: v1.1.1
version: v1.2.3
kubernetes.io/name: "GLBC"
spec:
terminationGracePeriodSeconds: 600
hostNetwork: true
containers:
- image: k8s.gcr.io/ingress-gce-glbc-amd64:v1.1.1
- image: k8s.gcr.io/ingress-gce-glbc-amd64:v1.2.3
livenessProbe:
httpGet:
path: /healthz
@@ -45,7 +45,7 @@ spec:
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
- sh
- -c
- 'exec /glbc --gce-ratelimit=ga.Operations.Get,qps,10,100 --gce-ratelimit=alpha.Operations.Get,qps,10,100 --gce-ratelimit=ga.BackendServices.Get,qps,1.8,1 --gce-ratelimit=ga.HealthChecks.Get,qps,1.8,1 --gce-ratelimit=alpha.HealthChecks.Get,qps,1.8,1 --verbose --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1'
- 'exec /glbc --gce-ratelimit=ga.Operations.Get,qps,10,100 --gce-ratelimit=alpha.Operations.Get,qps,10,100 --gce-ratelimit=ga.BackendServices.Get,qps,1.8,1 --gce-ratelimit=ga.HealthChecks.Get,qps,1.8,1 --gce-ratelimit=alpha.HealthChecks.Get,qps,1.8,1 --gce-ratelimit=beta.NetworkEndpointGroups.Get,qps,1.8,1 --gce-ratelimit=beta.NetworkEndpointGroups.AttachNetworkEndpoints,qps,1.8,1 --gce-ratelimit=beta.NetworkEndpointGroups.DetachNetworkEndpoints,qps,1.8,1 --gce-ratelimit=beta.NetworkEndpointGroups.ListNetworkEndpoints,qps,1.8,1 --verbose --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1'
volumes:
- hostPath:
path: /etc/gce.conf

View File

@@ -14,7 +14,7 @@ spec:
- name: kube-addon-manager
# When updating version also bump it in:
# - test/kubemark/resources/manifests/kube-addon-manager.yaml
image: k8s.gcr.io/kube-addon-manager:v8.6
image: k8s.gcr.io/kube-addon-manager:v8.7
command:
- /bin/bash
- -c
@@ -30,6 +30,9 @@ spec:
- mountPath: /var/log
name: varlog
readOnly: false
env:
- name: KUBECTL_EXTRA_PRUNE_WHITELIST
value: {{kubectl_extra_prune_whitelist}}
volumes:
- hostPath:
path: /etc/kubernetes/

View File

@@ -60,7 +60,7 @@
{{admission_controller_config_mount}}
{{image_policy_webhook_config_mount}}
{ "name": "srvkube",
"mountPath": "{{srv_kube_path}}",
"mountPath": "/etc/srv/kubernetes",
"readOnly": true},
{ "name": "logfile",
"mountPath": "/var/log/kube-apiserver.log",
@@ -102,7 +102,7 @@
{{image_policy_webhook_config_volume}}
{ "name": "srvkube",
"hostPath": {
"path": "{{srv_kube_path}}"}
"path": "/etc/srv/kubernetes"}
},
{ "name": "logfile",
"hostPath": {

View File

@@ -21,7 +21,7 @@
"image": "{{pillar['kube_docker_registry']}}/kube-controller-manager:{{pillar['kube-controller-manager_docker_tag']}}",
"resources": {
"requests": {
"cpu": "200m"
"cpu": "{{cpurequest}}"
}
},
"command": [
@@ -44,7 +44,7 @@
{{additional_cloud_config_mount}}
{{pv_recycler_mount}}
{ "name": "srvkube",
"mountPath": "{{srv_kube_path}}",
"mountPath": "/etc/srv/kubernetes",
"readOnly": true},
{{flexvolume_hostpath_mount}}
{ "name": "logfile",
@@ -74,7 +74,7 @@
{{pv_recycler_volume}}
{ "name": "srvkube",
"hostPath": {
"path": "{{srv_kube_path}}"}
"path": "/etc/srv/kubernetes"}
},
{{flexvolume_hostpath}}
{ "name": "logfile",

View File

@@ -6,15 +6,14 @@ metadata:
# This annotation ensures that kube-proxy does not get evicted if the node
# supports critical pod annotation based priority scheme.
# Note that kube-proxy runs as a static pod so this annotation does NOT have
# any effect on rescheduler (default scheduler and rescheduler are not
# involved in scheduling kube-proxy).
# any effect on default scheduler which scheduling kube-proxy.
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
tier: node
component: kube-proxy
spec:
{{pod_priority}}
priorityClassName: system-node-critical
hostNetwork: true
tolerations:
- operator: "Exists"

View File

@@ -21,7 +21,7 @@
"image": "{{pillar['kube_docker_registry']}}/kube-scheduler:{{pillar['kube-scheduler_docker_tag']}}",
"resources": {
"requests": {
"cpu": "75m"
"cpu": "{{cpurequest}}"
}
},
"command": [
@@ -46,7 +46,7 @@
},
{
"name": "srvkube",
"mountPath": "{{srv_kube_path}}",
"mountPath": "/etc/srv/kubernetes",
"readOnly": true
}
]
@@ -55,7 +55,7 @@
"volumes":[
{
"name": "srvkube",
"hostPath": {"path": "{{srv_kube_path}}"}
"hostPath": {"path": "/etc/srv/kubernetes"}
},
{
"name": "logfile",

View File

@@ -1,36 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: rescheduler-v0.4.0
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
k8s-app: rescheduler
version: v0.4.0
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Rescheduler"
spec:
hostNetwork: true
containers:
- image: k8s.gcr.io/rescheduler:v0.4.0
name: rescheduler
volumeMounts:
- mountPath: /var/log/rescheduler.log
name: logfile
readOnly: false
# TODO: Make resource requirements depend on the size of the cluster
resources:
requests:
cpu: 10m
memory: 100Mi
command:
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
- sh
- -c
- 'exec /rescheduler --running-in-cluster=false 1>>/var/log/rescheduler.log 2>&1'
volumes:
- hostPath:
path: /var/log/rescheduler.log
type: FileOrCreate
name: logfile

View File

@@ -161,8 +161,8 @@ export KUBE_GCE_ENABLE_IP_ALIASES=true
export SECONDARY_RANGE_NAME="pods-default"
export STORAGE_BACKEND="etcd3"
export STORAGE_MEDIA_TYPE="application/vnd.kubernetes.protobuf"
export ETCD_IMAGE=3.2.18-0
export ETCD_VERSION=3.2.18
export ETCD_IMAGE=3.2.24-1
export ETCD_VERSION=3.2.24
# Upgrade master with updated kube envs
${KUBE_ROOT}/cluster/gce/upgrade.sh -M -l

View File

@@ -109,6 +109,20 @@ function split_csv() {
# Verify prereqs
function verify-prereqs() {
local cmd
# we use openssl to generate certs
kube::util::test_openssl_installed
# ensure a version supported by easyrsa is installed
if [ "$(openssl version | cut -d\ -f1)" == "LibreSSL" ]; then
echo "LibreSSL is not supported. Please ensure openssl points to an OpenSSL binary"
if [ "$(uname -s)" == "Darwin" ]; then
echo 'On macOS we recommend using homebrew and adding "$(brew --prefix openssl)/bin" to your PATH'
fi
exit 1
fi
# we use gcloud to create the cluster, gsutil to stage binaries and data
for cmd in gcloud gsutil; do
if ! which "${cmd}" >/dev/null; then
local resp="n"
@@ -830,7 +844,6 @@ ENABLE_NODE_PROBLEM_DETECTOR: $(yaml-quote ${ENABLE_NODE_PROBLEM_DETECTOR:-none}
NODE_PROBLEM_DETECTOR_VERSION: $(yaml-quote ${NODE_PROBLEM_DETECTOR_VERSION:-})
NODE_PROBLEM_DETECTOR_TAR_HASH: $(yaml-quote ${NODE_PROBLEM_DETECTOR_TAR_HASH:-})
ENABLE_NODE_LOGGING: $(yaml-quote ${ENABLE_NODE_LOGGING:-false})
ENABLE_RESCHEDULER: $(yaml-quote ${ENABLE_RESCHEDULER:-false})
LOGGING_DESTINATION: $(yaml-quote ${LOGGING_DESTINATION:-})
ELASTICSEARCH_LOGGING_REPLICAS: $(yaml-quote ${ELASTICSEARCH_LOGGING_REPLICAS:-})
ENABLE_CLUSTER_DNS: $(yaml-quote ${ENABLE_CLUSTER_DNS:-false})
@@ -851,7 +864,6 @@ KUBELET_CERT: $(yaml-quote ${KUBELET_CERT_BASE64:-})
KUBELET_KEY: $(yaml-quote ${KUBELET_KEY_BASE64:-})
NETWORK_PROVIDER: $(yaml-quote ${NETWORK_PROVIDER:-})
NETWORK_POLICY_PROVIDER: $(yaml-quote ${NETWORK_POLICY_PROVIDER:-})
PREPULL_E2E_IMAGES: $(yaml-quote ${PREPULL_E2E_IMAGES:-})
HAIRPIN_MODE: $(yaml-quote ${HAIRPIN_MODE:-})
E2E_STORAGE_TEST_ENVIRONMENT: $(yaml-quote ${E2E_STORAGE_TEST_ENVIRONMENT:-})
KUBE_DOCKER_REGISTRY: $(yaml-quote ${KUBE_DOCKER_REGISTRY:-})
@@ -859,13 +871,12 @@ KUBE_ADDON_REGISTRY: $(yaml-quote ${KUBE_ADDON_REGISTRY:-})
MULTIZONE: $(yaml-quote ${MULTIZONE:-})
NON_MASQUERADE_CIDR: $(yaml-quote ${NON_MASQUERADE_CIDR:-})
ENABLE_DEFAULT_STORAGE_CLASS: $(yaml-quote ${ENABLE_DEFAULT_STORAGE_CLASS:-})
ENABLE_APISERVER_BASIC_AUDIT: $(yaml-quote ${ENABLE_APISERVER_BASIC_AUDIT:-})
ENABLE_APISERVER_ADVANCED_AUDIT: $(yaml-quote ${ENABLE_APISERVER_ADVANCED_AUDIT:-})
ENABLE_CACHE_MUTATION_DETECTOR: $(yaml-quote ${ENABLE_CACHE_MUTATION_DETECTOR:-false})
ENABLE_PATCH_CONVERSION_DETECTOR: $(yaml-quote ${ENABLE_PATCH_CONVERSION_DETECTOR:-false})
ADVANCED_AUDIT_POLICY: $(yaml-quote ${ADVANCED_AUDIT_POLICY:-})
ADVANCED_AUDIT_BACKEND: $(yaml-quote ${ADVANCED_AUDIT_BACKEND:-log})
ADVANCED_AUDIT_TRUNCATING_BACKEND: $(yaml-quote ${ADVANCED_AUDIT_TRUNCATING_BACKEND:-})
ADVANCED_AUDIT_TRUNCATING_BACKEND: $(yaml-quote ${ADVANCED_AUDIT_TRUNCATING_BACKEND:-true})
ADVANCED_AUDIT_LOG_MODE: $(yaml-quote ${ADVANCED_AUDIT_LOG_MODE:-})
ADVANCED_AUDIT_LOG_BUFFER_SIZE: $(yaml-quote ${ADVANCED_AUDIT_LOG_BUFFER_SIZE:-})
ADVANCED_AUDIT_LOG_MAX_BATCH_SIZE: $(yaml-quote ${ADVANCED_AUDIT_LOG_MAX_BATCH_SIZE:-})
@@ -886,6 +897,7 @@ ENABLE_NODE_JOURNAL: $(yaml-quote ${ENABLE_NODE_JOURNAL:-false})
PROMETHEUS_TO_SD_ENDPOINT: $(yaml-quote ${PROMETHEUS_TO_SD_ENDPOINT:-})
PROMETHEUS_TO_SD_PREFIX: $(yaml-quote ${PROMETHEUS_TO_SD_PREFIX:-})
ENABLE_PROMETHEUS_TO_SD: $(yaml-quote ${ENABLE_PROMETHEUS_TO_SD:-false})
DISABLE_PROMETHEUS_TO_SD_IN_DS: $(yaml-quote ${DISABLE_PROMETHEUS_TO_SD_IN_DS:-false})
ENABLE_POD_PRIORITY: $(yaml-quote ${ENABLE_POD_PRIORITY:-})
CONTAINER_RUNTIME: $(yaml-quote ${CONTAINER_RUNTIME:-})
CONTAINER_RUNTIME_ENDPOINT: $(yaml-quote ${CONTAINER_RUNTIME_ENDPOINT:-})
@@ -898,8 +910,13 @@ VOLUME_PLUGIN_DIR: $(yaml-quote ${VOLUME_PLUGIN_DIR})
KUBELET_ARGS: $(yaml-quote ${KUBELET_ARGS})
REQUIRE_METADATA_KUBELET_CONFIG_FILE: $(yaml-quote true)
ENABLE_NETD: $(yaml-quote ${ENABLE_NETD:-false})
ENABLE_NODE_TERMINATION_HANDLER: $(yaml-quote ${ENABLE_NODE_TERMINATION_HANDLER:-false})
CUSTOM_NETD_YAML: |
$(echo "${CUSTOM_NETD_YAML:-}" | sed -e "s/'/''/g")
CUSTOM_CALICO_NODE_DAEMONSET_YAML: |
$(echo "${CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}" | sed -e "s/'/''/g")
CUSTOM_TYPHA_DEPLOYMENT_YAML: |
$(echo "${CUSTOM_TYPHA_DEPLOYMENT_YAML:-}" | sed -e "s/'/''/g")
EOF
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "gci" ]] || \
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "gci" ]] || \
@@ -1053,6 +1070,16 @@ EOF
if [ -n "${ETCD_EXTRA_ARGS:-}" ]; then
cat >>$file <<EOF
ETCD_EXTRA_ARGS: $(yaml-quote ${ETCD_EXTRA_ARGS})
EOF
fi
if [ -n "${ETCD_SERVERS:-}" ]; then
cat >>$file <<EOF
ETCD_SERVERS: $(yaml-quote ${ETCD_SERVERS})
EOF
fi
if [ -n "${ETCD_SERVERS_OVERRIDES:-}" ]; then
cat >>$file <<EOF
ETCD_SERVERS_OVERRIDES: $(yaml-quote ${ETCD_SERVERS_OVERRIDES})
EOF
fi
if [ -n "${APISERVER_TEST_ARGS:-}" ]; then
@@ -1088,11 +1115,6 @@ EOF
if [ -n "${INITIAL_ETCD_CLUSTER_STATE:-}" ]; then
cat >>$file <<EOF
INITIAL_ETCD_CLUSTER_STATE: $(yaml-quote ${INITIAL_ETCD_CLUSTER_STATE})
EOF
fi
if [ -n "${ETCD_QUORUM_READ:-}" ]; then
cat >>$file <<EOF
ETCD_QUORUM_READ: $(yaml-quote ${ETCD_QUORUM_READ})
EOF
fi
if [ -n "${CLUSTER_SIGNING_DURATION:-}" ]; then
@@ -1594,7 +1616,7 @@ function validate-node-local-ssds-ext(){
ssdopts="${1}"
if [[ -z "${ssdopts[0]}" || -z "${ssdopts[1]}" || -z "${ssdopts[2]}" ]]; then
echo -e "${color_red}Local SSD: NODE_LOCAL_SSDS_EXT is malformed, found ${ssdopts[0]-_},${ssdopts[1]-_},${ssdopts[2]-_} ${color_norm}" >&2
echo -e "${color_red}Local SSD: NODE_LOCAL_SSDS_EXT is malformed, found ${ssdopts[0]-_},${ssdopts[1]-_},${ssdopts[2]-_} ${color_norm}" >&2
exit 2
fi
if [[ "${ssdopts[1]}" != "scsi" && "${ssdopts[1]}" != "nvme" ]]; then
@@ -2317,6 +2339,8 @@ function create-nodes() {
if [[ -z "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
local -r nodes="${NUM_NODES}"
else
echo "Creating a special node for heapster with machine-type ${HEAPSTER_MACHINE_TYPE}"
create-heapster-node
local -r nodes=$(( NUM_NODES - 1 ))
fi
@@ -2344,13 +2368,9 @@ function create-nodes() {
gcloud compute instance-groups managed wait-until-stable \
"${group_name}" \
--zone "${ZONE}" \
--project "${PROJECT}" || true;
--project "${PROJECT}" \
--timeout "${MIG_WAIT_UNTIL_STABLE_TIMEOUT}" || true;
done
if [[ -n "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
echo "Creating a special node for heapster with machine-type ${HEAPSTER_MACHINE_TYPE}"
create-heapster-node
fi
}
# Assumes:
@@ -2943,75 +2963,6 @@ function check-resources() {
return 0
}
# Prepare to push new binaries to kubernetes cluster
# $1 - whether prepare push to node
function prepare-push() {
local node="${1-}"
#TODO(dawnchen): figure out how to upgrade a Container Linux node
if [[ "${node}" == "true" && "${NODE_OS_DISTRIBUTION}" != "debian" ]]; then
echo "Updating nodes in a kubernetes cluster with ${NODE_OS_DISTRIBUTION} is not supported yet." >&2
exit 1
fi
if [[ "${node}" != "true" && "${MASTER_OS_DISTRIBUTION}" != "debian" ]]; then
echo "Updating the master in a kubernetes cluster with ${MASTER_OS_DISTRIBUTION} is not supported yet." >&2
exit 1
fi
OUTPUT=${KUBE_ROOT}/_output/logs
mkdir -p ${OUTPUT}
kube::util::ensure-temp-dir
detect-project
detect-master
detect-node-names
get-kubeconfig-basicauth
get-kubeconfig-bearertoken
# Make sure we have the tar files staged on Google Storage
tars_from_version
# Prepare node env vars and update MIG template
if [[ "${node}" == "true" ]]; then
write-node-env
local scope_flags=$(get-scope-flags)
# Ugly hack: Since it is not possible to delete instance-template that is currently
# being used, create a temp one, then delete the old one and recreate it once again.
local tmp_template_name="${NODE_INSTANCE_PREFIX}-template-tmp"
create-node-instance-template $tmp_template_name
local template_name="${NODE_INSTANCE_PREFIX}-template"
for group in ${INSTANCE_GROUPS[@]:-}; do
gcloud compute instance-groups managed \
set-instance-template "${group}" \
--template "$tmp_template_name" \
--zone "${ZONE}" \
--project "${PROJECT}" || true;
done
gcloud compute instance-templates delete \
--project "${PROJECT}" \
--quiet \
"$template_name" || true
create-node-instance-template "$template_name"
for group in ${INSTANCE_GROUPS[@]:-}; do
gcloud compute instance-groups managed \
set-instance-template "${group}" \
--template "$template_name" \
--zone "${ZONE}" \
--project "${PROJECT}" || true;
done
gcloud compute instance-templates delete \
--project "${PROJECT}" \
--quiet \
"$tmp_template_name" || true
fi
}
# -----------------------------------------------------------------------------
# Cluster specific test helpers used from hack/e2e.go
@@ -3120,3 +3071,8 @@ function ssh-to-node() {
function prepare-e2e() {
detect-project
}
# Delete the image given by $1.
function delete-image() {
gcloud container images delete --quiet "$1"
}