add prune and remove unused packages
This commit is contained in:
61
vendor/k8s.io/kubernetes/cluster/BUILD
generated
vendored
61
vendor/k8s.io/kubernetes/cluster/BUILD
generated
vendored
@@ -1,61 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//cluster/addons:all-srcs",
|
||||
"//cluster/gce:all-srcs",
|
||||
"//cluster/images/etcd-version-monitor:all-srcs",
|
||||
"//cluster/images/etcd/migrate:all-srcs",
|
||||
"//cluster/images/hyperkube:all-srcs",
|
||||
"//cluster/images/kubemark:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "manifests",
|
||||
mode = "0644",
|
||||
package_dir = "kubernetes/gci-trusty",
|
||||
deps = [
|
||||
"//cluster/addons",
|
||||
"//cluster/gce/addons",
|
||||
"//cluster/gce/gci:gci-trusty-manifests",
|
||||
"//cluster/gce/manifests:gce-master-manifests",
|
||||
],
|
||||
)
|
||||
|
||||
# These tests just verify that bash can interpret the file.
|
||||
sh_test(
|
||||
name = "common_test",
|
||||
srcs = ["common.sh"],
|
||||
deps = [
|
||||
"//hack/lib",
|
||||
],
|
||||
)
|
||||
|
||||
sh_test(
|
||||
name = "clientbin_test",
|
||||
srcs = ["clientbin.sh"],
|
||||
deps = [
|
||||
"//hack/lib",
|
||||
],
|
||||
)
|
||||
|
||||
sh_test(
|
||||
name = "kube-util_test",
|
||||
srcs = ["kube-util.sh"],
|
||||
deps = [
|
||||
"//hack/lib",
|
||||
],
|
||||
)
|
15
vendor/k8s.io/kubernetes/cluster/OWNERS
generated
vendored
15
vendor/k8s.io/kubernetes/cluster/OWNERS
generated
vendored
@@ -1,15 +0,0 @@
|
||||
reviewers:
|
||||
- eparis
|
||||
- jbeda
|
||||
- mikedanese
|
||||
- roberthbailey
|
||||
- spiffxp
|
||||
- zmerlynn
|
||||
approvers:
|
||||
- eparis
|
||||
- jbeda
|
||||
- mikedanese
|
||||
- roberthbailey
|
||||
- zmerlynn
|
||||
labels:
|
||||
- sig/cluster-lifecycle
|
12
vendor/k8s.io/kubernetes/cluster/README.md
generated
vendored
12
vendor/k8s.io/kubernetes/cluster/README.md
generated
vendored
@@ -1,12 +0,0 @@
|
||||
# Cluster Configuration
|
||||
|
||||
##### Deprecation Notice: This directory has entered maintenance mode and will not be accepting new providers. Deployments in this directory will continue to be maintained and supported at their current level of support.
|
||||
|
||||
The scripts and data in this directory automate creation and configuration of a Kubernetes cluster, including networking, DNS, nodes, and control plane components.
|
||||
|
||||
See the [getting-started guides](https://kubernetes.io/docs/getting-started-guides) for examples of how to use the scripts.
|
||||
|
||||
*cloudprovider*/`config-default.sh` contains a set of tweakable definitions/parameters for the cluster.
|
||||
|
||||
|
||||
[]()
|
41
vendor/k8s.io/kubernetes/cluster/addons/BUILD
generated
vendored
41
vendor/k8s.io/kubernetes/cluster/addons/BUILD
generated
vendored
@@ -1,41 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load("@io_kubernetes_build//defs:pkg.bzl", "pkg_tar")
|
||||
|
||||
filegroup(
|
||||
name = "addon-srcs",
|
||||
srcs = glob(
|
||||
[
|
||||
"**/*.json",
|
||||
"**/*.yaml",
|
||||
"**/*.yaml.in",
|
||||
],
|
||||
exclude = ["**/*demo*/**"],
|
||||
),
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "addons",
|
||||
srcs = [
|
||||
":addon-srcs",
|
||||
],
|
||||
extension = "tar.gz",
|
||||
mode = "0644",
|
||||
strip_prefix = ".",
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//cluster/addons/fluentd-elasticsearch/es-image:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
36
vendor/k8s.io/kubernetes/cluster/addons/README.md
generated
vendored
36
vendor/k8s.io/kubernetes/cluster/addons/README.md
generated
vendored
@@ -1,36 +0,0 @@
|
||||
# Legacy Cluster add-ons
|
||||
|
||||
For more information on add-ons see [the documentation](https://kubernetes.io/docs/concepts/cluster-administration/addons/).
|
||||
|
||||
## Overview
|
||||
|
||||
Cluster add-ons are resources like Services and Deployments (with pods) that are
|
||||
shipped with the Kubernetes binaries and are considered an inherent part of the
|
||||
Kubernetes clusters.
|
||||
|
||||
There are currently two classes of add-ons:
|
||||
- Add-ons that will be reconciled.
|
||||
- Add-ons that will be created if they don't exist.
|
||||
|
||||
More details could be found in [addon-manager/README.md](addon-manager/README.md).
|
||||
|
||||
## Cooperating Horizontal / Vertical Auto-Scaling with "reconcile class addons"
|
||||
|
||||
"Reconcile" class addons will be periodically reconciled to the original state given
|
||||
by the initial config. In order to make Horizontal / Vertical Auto-scaling functional,
|
||||
the related fields in config should be left unset. More specifically, leave `replicas`
|
||||
in `ReplicationController` / `Deployment` / `ReplicaSet` unset for Horizontal Scaling,
|
||||
leave `resources` for container unset for Vertical Scaling. The periodic reconcile
|
||||
won't clobbered these fields, hence they could be managed by Horizontal / Vertical
|
||||
Auto-scaler.
|
||||
|
||||
## Add-on naming
|
||||
|
||||
The suggested naming for most of the resources is `<basename>` (with no version number).
|
||||
Though resources like `Pod`, `ReplicationController` and `DaemonSet` are exceptional.
|
||||
It would be hard to update `Pod` because many fields in `Pod` are immutable. For
|
||||
`ReplicationController` and `DaemonSet`, in-place update may not trigger the underlying
|
||||
pods to be re-created. You probably need to change their names during update to trigger
|
||||
a complete deletion and creation.
|
||||
|
||||
[]()
|
65
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/CHANGELOG.md
generated
vendored
65
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/CHANGELOG.md
generated
vendored
@@ -1,65 +0,0 @@
|
||||
### Version 8.7 (Tue September 4 2018 Zihong Zheng <zihongz@google.com>)
|
||||
- Support extra `--prune-whitelist` resources in kube-addon-manager.
|
||||
- Update kubectl to v1.10.7.
|
||||
|
||||
### Version 8.6 (Tue February 20 2018 Zihong Zheng <zihongz@google.com>)
|
||||
- Allow reconcile/ensure loop to work with resource under non-kube-system namespace.
|
||||
- Update kubectl to v1.9.3.
|
||||
|
||||
### Version 8.4 (Thu November 30 2017 zou nengren @zouyee)
|
||||
- Update kubectl to v1.8.4.
|
||||
|
||||
### Version 6.5 (Wed October 15 2017 Daniel Kłobuszewski <danielmk@google.com>)
|
||||
- Support for HA masters.
|
||||
|
||||
### Version 6.4-beta.2 (Mon June 12 2017 Jeff Grafton <jgrafton@google.com>)
|
||||
- Update kubectl to v1.6.4.
|
||||
- Refresh base images.
|
||||
|
||||
### Version 6.4-beta.1 (Wed March 8 2017 Zihong Zheng <zihongz@google.com>)
|
||||
- Create EnsureExists class addons before Reconcile class addons.
|
||||
|
||||
### Version 6.4-alpha.3 (Fri February 24 2017 Zihong Zheng <zihongz@google.com>)
|
||||
- Support 'ensure exist' class addon and use addon-manager specific label.
|
||||
|
||||
### Version 6.4-alpha.2 (Wed February 16 2017 Zihong Zheng <zihongz@google.com>)
|
||||
- Update kubectl to v1.6.0-alpha.2 to use HPA in autoscaling/v1 instead of extensions/v1beta1.
|
||||
|
||||
### Version 6.4-alpha.1 (Wed February 1 2017 Zihong Zheng <zihongz@google.com>)
|
||||
- Update kubectl to v1.6.0-alpha.1 for supporting optional ConfigMap.
|
||||
|
||||
### Version 6.3 (Fri January 27 2017 Lucas Käldström <lucas.kaldstrom@hotmail.co.uk>)
|
||||
- Updated the arm base image to `armhf/busybox` and now using qemu v2.7 for emulation.
|
||||
|
||||
### Version 6.2 (Thu January 12 2017 Zihong Zheng <zihongz@google.com>)
|
||||
- Update kubectl to the stable version.
|
||||
|
||||
### Version 6.1 (Tue November 29 2016 Zihong Zheng <zihongz@google.com>)
|
||||
- Support pruning old Deployments.
|
||||
|
||||
### Version 6.0 (Fri November 18 2016 Zihong Zheng <zihongz@google.com>)
|
||||
- Upgrade Addon Manager to use `kubectl apply`.
|
||||
|
||||
### Version 5.2 (Wed October 26 2016 Zihong Zheng <zihongz@google.com>)
|
||||
- Added support for ConfigMap and upgraded kubectl version to v1.4.4 (pr #35255)
|
||||
|
||||
### Version 5.1 (Mon Jul 4 2016 Marek Grabowski <gmarek@google.com>)
|
||||
- Fixed the way addon-manager handles non-namespaced objects
|
||||
|
||||
### Version 5 (Fri Jun 24 2016 Jerzy Szczepkowski @jszczepkowski)
|
||||
- Added PetSet support to addon manager
|
||||
|
||||
### Version 4 (Tue Jun 21 2016 Mike Danese @mikedanese)
|
||||
- Increased addon check interval
|
||||
|
||||
### Version 3 (Sun Jun 19 2016 Lucas Käldström @luxas)
|
||||
- Bumped up addon-manager to v3
|
||||
|
||||
### Version 2 (Fri May 20 2016 Lucas Käldström @luxas)
|
||||
- Removed deprecated kubectl command, added support for DaemonSets
|
||||
|
||||
### Version 1 (Thu May 5 2016 Mike Danese @mikedanese)
|
||||
- Run kube-addon-manager in a pod
|
||||
|
||||
|
||||
[]()
|
21
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/Dockerfile
generated
vendored
21
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/Dockerfile
generated
vendored
@@ -1,21 +0,0 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM BASEIMAGE
|
||||
|
||||
ADD kube-addons.sh /opt/
|
||||
ADD namespace.yaml /opt/
|
||||
ADD kubectl /usr/local/bin/
|
||||
|
||||
CMD ["/opt/kube-addons.sh"]
|
58
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/Makefile
generated
vendored
58
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/Makefile
generated
vendored
@@ -1,58 +0,0 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
IMAGE=staging-k8s.gcr.io/kube-addon-manager
|
||||
ARCH?=amd64
|
||||
TEMP_DIR:=$(shell mktemp -d)
|
||||
VERSION=v8.7
|
||||
KUBECTL_VERSION?=v1.10.7
|
||||
|
||||
ifeq ($(ARCH),amd64)
|
||||
BASEIMAGE?=bashell/alpine-bash
|
||||
endif
|
||||
ifeq ($(ARCH),arm)
|
||||
BASEIMAGE?=arm32v7/debian
|
||||
endif
|
||||
ifeq ($(ARCH),arm64)
|
||||
BASEIMAGE?=arm64v8/debian
|
||||
endif
|
||||
ifeq ($(ARCH),ppc64le)
|
||||
BASEIMAGE?=ppc64le/debian
|
||||
endif
|
||||
ifeq ($(ARCH),s390x)
|
||||
BASEIMAGE?=s390x/debian
|
||||
endif
|
||||
|
||||
.PHONY: build push
|
||||
|
||||
all: build
|
||||
|
||||
build:
|
||||
cp ./* $(TEMP_DIR)
|
||||
curl -sSL --retry 5 https://dl.k8s.io/release/$(KUBECTL_VERSION)/bin/linux/$(ARCH)/kubectl > $(TEMP_DIR)/kubectl
|
||||
chmod +x $(TEMP_DIR)/kubectl
|
||||
cd $(TEMP_DIR) && sed -i.back "s|BASEIMAGE|$(BASEIMAGE)|g" Dockerfile
|
||||
docker build --pull -t $(IMAGE)-$(ARCH):$(VERSION) $(TEMP_DIR)
|
||||
|
||||
push: build
|
||||
docker push $(IMAGE)-$(ARCH):$(VERSION)
|
||||
ifeq ($(ARCH),amd64)
|
||||
# Backward compatibility. TODO: deprecate this image tag
|
||||
docker rmi $(IMAGE):$(VERSION) 2>/dev/null || true
|
||||
docker tag $(IMAGE)-$(ARCH):$(VERSION) $(IMAGE):$(VERSION)
|
||||
docker push $(IMAGE):$(VERSION)
|
||||
endif
|
||||
|
||||
clean:
|
||||
docker rmi -f $(IMAGE)-$(ARCH):$(VERSION)
|
2
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/OWNERS
generated
vendored
@@ -1,2 +0,0 @@
|
||||
reviewers:
|
||||
- mrhohn
|
61
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/README.md
generated
vendored
61
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/README.md
generated
vendored
@@ -1,61 +0,0 @@
|
||||
### Addon-manager
|
||||
|
||||
addon-manager manages two classes of addons with given template files in
|
||||
`$ADDON_PATH` (default `/etc/kubernetes/addons/`).
|
||||
- Addons with label `addonmanager.kubernetes.io/mode=Reconcile` will be periodically
|
||||
reconciled. Direct manipulation to these addons through apiserver is discouraged because
|
||||
addon-manager will bring them back to the original state. In particular:
|
||||
- Addon will be re-created if it is deleted.
|
||||
- Addon will be reconfigured to the state given by the supplied fields in the template
|
||||
file periodically.
|
||||
- Addon will be deleted when its manifest file is deleted from the `$ADDON_PATH`.
|
||||
- Addons with label `addonmanager.kubernetes.io/mode=EnsureExists` will be checked for
|
||||
existence only. Users can edit these addons as they want. In particular:
|
||||
- Addon will only be created/re-created with the given template file when there is no
|
||||
instance of the resource with that name.
|
||||
- Addon will not be deleted when the manifest file is deleted from the `$ADDON_PATH`.
|
||||
|
||||
Notes:
|
||||
- Label `kubernetes.io/cluster-service=true` is deprecated (only for Addon Manager).
|
||||
In future release (after one year), Addon Manager may not respect it anymore. Addons
|
||||
have this label but without `addonmanager.kubernetes.io/mode=EnsureExists` will be
|
||||
treated as "reconcile class addons" for now.
|
||||
- Resources under `$ADDON_PATH` need to have either one of these two labels.
|
||||
Otherwise it will be omitted.
|
||||
- The above label and namespace rule does not stand for `/opt/namespace.yaml` and
|
||||
resources under `/etc/kubernetes/admission-controls/`. addon-manager will attempt to
|
||||
create them regardless during startup.
|
||||
|
||||
#### How to release
|
||||
|
||||
The `addon-manager` is built for multiple architectures.
|
||||
|
||||
1. Change something in the source
|
||||
2. Bump `VERSION` in the `Makefile`
|
||||
3. Bump `KUBECTL_VERSION` in the `Makefile` if required
|
||||
4. Build the `amd64` image and test it on a cluster
|
||||
5. Push all images
|
||||
|
||||
```console
|
||||
# Build for linux/amd64 (default)
|
||||
$ make push ARCH=amd64
|
||||
# ---> staging-k8s.gcr.io/kube-addon-manager-amd64:VERSION
|
||||
# ---> staging-k8s.gcr.io/kube-addon-manager:VERSION (image with backwards-compatible naming)
|
||||
|
||||
$ make push ARCH=arm
|
||||
# ---> staging-k8s.gcr.io/kube-addon-manager-arm:VERSION
|
||||
|
||||
$ make push ARCH=arm64
|
||||
# ---> staging-k8s.gcr.io/kube-addon-manager-arm64:VERSION
|
||||
|
||||
$ make push ARCH=ppc64le
|
||||
# ---> staging-k8s.gcr.io/kube-addon-manager-ppc64le:VERSION
|
||||
|
||||
$ make push ARCH=s390x
|
||||
# ---> staging-k8s.gcr.io/kube-addon-manager-s390x:VERSION
|
||||
```
|
||||
|
||||
If you don't want to push the images, run `make` or `make build` instead
|
||||
|
||||
|
||||
[]()
|
257
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/kube-addons.sh
generated
vendored
257
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/kube-addons.sh
generated
vendored
@@ -1,257 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# LIMITATIONS
|
||||
# 1. Exit code is probably not always correct.
|
||||
# 2. There are no unittests.
|
||||
# 3. Will not work if the total length of paths to addons is greater than
|
||||
# bash can handle. Probably it is not a problem: ARG_MAX=2097152 on GCE.
|
||||
|
||||
# cosmetic improvements to be done
|
||||
# 1. Improve the log function; add timestamp, file name, etc.
|
||||
# 2. Logging doesn't work from files that print things out.
|
||||
# 3. Kubectl prints the output to stderr (the output should be captured and then
|
||||
# logged)
|
||||
|
||||
KUBECTL=${KUBECTL_BIN:-/usr/local/bin/kubectl}
|
||||
KUBECTL_OPTS=${KUBECTL_OPTS:-}
|
||||
# KUBECTL_PRUNE_WHITELIST is a list of resources whitelisted by
|
||||
# default.
|
||||
# This is currently the same with the default in:
|
||||
# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/apply.go
|
||||
KUBECTL_PRUNE_WHITELIST=(
|
||||
core/v1/ConfigMap
|
||||
core/v1/Endpoints
|
||||
core/v1/Namespace
|
||||
core/v1/PersistentVolumeClaim
|
||||
core/v1/PersistentVolume
|
||||
core/v1/Pod
|
||||
core/v1/ReplicationController
|
||||
core/v1/Secret
|
||||
core/v1/Service
|
||||
batch/v1/Job
|
||||
batch/v1beta1/CronJob
|
||||
extensions/v1beta1/DaemonSet
|
||||
extensions/v1beta1/Deployment
|
||||
extensions/v1beta1/Ingress
|
||||
extensions/v1beta1/ReplicaSet
|
||||
apps/v1beta1/StatefulSet
|
||||
apps/v1beta1/Deployment
|
||||
)
|
||||
|
||||
ADDON_CHECK_INTERVAL_SEC=${TEST_ADDON_CHECK_INTERVAL_SEC:-60}
|
||||
ADDON_PATH=${ADDON_PATH:-/etc/kubernetes/addons}
|
||||
|
||||
SYSTEM_NAMESPACE=kube-system
|
||||
|
||||
# Addons could use this label with two modes:
|
||||
# - ADDON_MANAGER_LABEL=Reconcile
|
||||
# - ADDON_MANAGER_LABEL=EnsureExists
|
||||
ADDON_MANAGER_LABEL="addonmanager.kubernetes.io/mode"
|
||||
# This label is deprecated (only for Addon Manager). In future release
|
||||
# addon-manager may not respect it anymore. Addons with
|
||||
# CLUSTER_SERVICE_LABEL=true and without ADDON_MANAGER_LABEL=EnsureExists
|
||||
# will be reconciled for now.
|
||||
CLUSTER_SERVICE_LABEL="kubernetes.io/cluster-service"
|
||||
|
||||
# Whether only one addon manager should be running in a multi-master setup.
|
||||
# Disabling this flag will force all addon managers to assume they are the
|
||||
# leaders.
|
||||
ADDON_MANAGER_LEADER_ELECTION=${ADDON_MANAGER_LEADER_ELECTION:-true}
|
||||
|
||||
# Remember that you can't log from functions that print some output (because
|
||||
# logs are also printed on stdout).
|
||||
# $1 level
|
||||
# $2 message
|
||||
function log() {
|
||||
# manage log levels manually here
|
||||
|
||||
# add the timestamp if you find it useful
|
||||
case $1 in
|
||||
DB3 )
|
||||
# echo "$1: $2"
|
||||
;;
|
||||
DB2 )
|
||||
# echo "$1: $2"
|
||||
;;
|
||||
DBG )
|
||||
# echo "$1: $2"
|
||||
;;
|
||||
INFO )
|
||||
echo "$1: $2"
|
||||
;;
|
||||
WRN )
|
||||
echo "$1: $2"
|
||||
;;
|
||||
ERR )
|
||||
echo "$1: $2"
|
||||
;;
|
||||
* )
|
||||
echo "INVALID_LOG_LEVEL $1: $2"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Generate kubectl prune-whitelist flags from provided resource list.
|
||||
function generate_prune_whitelist_flags() {
|
||||
local -r resources=($@)
|
||||
for resource in "${resources[@]}"; do
|
||||
printf "%s" "--prune-whitelist ${resource} "
|
||||
done
|
||||
}
|
||||
|
||||
# KUBECTL_EXTRA_PRUNE_WHITELIST is a list of extra whitelisted resources
|
||||
# besides the default ones.
|
||||
extra_prune_whitelist=
|
||||
if [ -n "${KUBECTL_EXTRA_PRUNE_WHITELIST:-}" ]; then
|
||||
extra_prune_whitelist=( ${KUBECTL_EXTRA_PRUNE_WHITELIST:-} )
|
||||
fi
|
||||
prune_whitelist=( ${KUBECTL_PRUNE_WHITELIST[@]} ${extra_prune_whitelist[@]} )
|
||||
prune_whitelist_flags=$(generate_prune_whitelist_flags ${prune_whitelist[@]})
|
||||
|
||||
log INFO "== Generated kubectl prune whitelist flags: $prune_whitelist_flags =="
|
||||
|
||||
# $1 filename of addon to start.
|
||||
# $2 count of tries to start the addon.
|
||||
# $3 delay in seconds between two consecutive tries
|
||||
# $4 namespace
|
||||
function start_addon() {
|
||||
local -r addon_filename=$1;
|
||||
local -r tries=$2;
|
||||
local -r delay=$3;
|
||||
local -r namespace=$4
|
||||
|
||||
create_resource_from_string "$(cat ${addon_filename})" "${tries}" "${delay}" "${addon_filename}" "${namespace}"
|
||||
}
|
||||
|
||||
# $1 string with json or yaml.
|
||||
# $2 count of tries to start the addon.
|
||||
# $3 delay in seconds between two consecutive tries
|
||||
# $4 name of this object to use when logging about it.
|
||||
# $5 namespace for this object
|
||||
function create_resource_from_string() {
|
||||
local -r config_string=$1;
|
||||
local tries=$2;
|
||||
local -r delay=$3;
|
||||
local -r config_name=$4;
|
||||
local -r namespace=$5;
|
||||
while [ ${tries} -gt 0 ]; do
|
||||
echo "${config_string}" | ${KUBECTL} ${KUBECTL_OPTS} --namespace="${namespace}" apply -f - && \
|
||||
log INFO "== Successfully started ${config_name} in namespace ${namespace} at $(date -Is)" && \
|
||||
return 0;
|
||||
let tries=tries-1;
|
||||
log WRN "== Failed to start ${config_name} in namespace ${namespace} at $(date -Is). ${tries} tries remaining. =="
|
||||
sleep ${delay};
|
||||
done
|
||||
return 1;
|
||||
}
|
||||
|
||||
function reconcile_addons() {
|
||||
# TODO: Remove the first command in future release.
|
||||
# Adding this for backward compatibility. Old addons have CLUSTER_SERVICE_LABEL=true and don't have
|
||||
# ADDON_MANAGER_LABEL=EnsureExists will still be reconciled.
|
||||
# Filter out `configured` message to not noisily log.
|
||||
# `created`, `pruned` and errors will be logged.
|
||||
log INFO "== Reconciling with deprecated label =="
|
||||
${KUBECTL} ${KUBECTL_OPTS} apply -f ${ADDON_PATH} \
|
||||
-l ${CLUSTER_SERVICE_LABEL}=true,${ADDON_MANAGER_LABEL}!=EnsureExists \
|
||||
--prune=true ${prune_whitelist_flags} --recursive | grep -v configured
|
||||
|
||||
log INFO "== Reconciling with addon-manager label =="
|
||||
${KUBECTL} ${KUBECTL_OPTS} apply -f ${ADDON_PATH} \
|
||||
-l ${CLUSTER_SERVICE_LABEL}!=true,${ADDON_MANAGER_LABEL}=Reconcile \
|
||||
--prune=true ${prune_whitelist_flags} --recursive | grep -v configured
|
||||
|
||||
log INFO "== Kubernetes addon reconcile completed at $(date -Is) =="
|
||||
}
|
||||
|
||||
function ensure_addons() {
|
||||
# Create objects already exist should fail.
|
||||
# Filter out `AlreadyExists` message to not noisily log.
|
||||
${KUBECTL} ${KUBECTL_OPTS} create -f ${ADDON_PATH} \
|
||||
-l ${ADDON_MANAGER_LABEL}=EnsureExists --recursive 2>&1 | grep -v AlreadyExists
|
||||
|
||||
log INFO "== Kubernetes addon ensure completed at $(date -Is) =="
|
||||
}
|
||||
|
||||
function is_leader() {
|
||||
# In multi-master setup, only one addon manager should be running. We use
|
||||
# existing leader election in kube-controller-manager instead of implementing
|
||||
# a separate mechanism here.
|
||||
if ! $ADDON_MANAGER_LEADER_ELECTION; then
|
||||
log INFO "Leader election disabled."
|
||||
return 0;
|
||||
fi
|
||||
KUBE_CONTROLLER_MANAGER_LEADER=`${KUBECTL} -n kube-system get ep kube-controller-manager \
|
||||
-o go-template=$'{{index .metadata.annotations "control-plane.alpha.kubernetes.io/leader"}}' \
|
||||
| sed 's/^.*"holderIdentity":"\([^"]*\)".*/\1/' | awk -F'_' '{print $1}'`
|
||||
# If there was any problem with getting the leader election results, var will
|
||||
# be empty. Since it's better to have multiple addon managers than no addon
|
||||
# managers at all, we're going to assume that we're the leader in such case.
|
||||
log INFO "Leader is $KUBE_CONTROLLER_MANAGER_LEADER"
|
||||
[[ "$KUBE_CONTROLLER_MANAGER_LEADER" == "" ||
|
||||
"$HOSTNAME" == "$KUBE_CONTROLLER_MANAGER_LEADER" ]]
|
||||
}
|
||||
|
||||
# The business logic for whether a given object should be created
|
||||
# was already enforced by salt, and /etc/kubernetes/addons is the
|
||||
# managed result is of that. Start everything below that directory.
|
||||
log INFO "== Kubernetes addon manager started at $(date -Is) with ADDON_CHECK_INTERVAL_SEC=${ADDON_CHECK_INTERVAL_SEC} =="
|
||||
|
||||
# Create the namespace that will be used to host the cluster-level add-ons.
|
||||
start_addon /opt/namespace.yaml 100 10 "" &
|
||||
|
||||
# Wait for the default service account to be created in the kube-system namespace.
|
||||
token_found=""
|
||||
while [ -z "${token_found}" ]; do
|
||||
sleep .5
|
||||
token_found=$(${KUBECTL} ${KUBECTL_OPTS} get --namespace="${SYSTEM_NAMESPACE}" serviceaccount default -o go-template="{{with index .secrets 0}}{{.name}}{{end}}")
|
||||
if [[ $? -ne 0 ]]; then
|
||||
token_found="";
|
||||
log WRN "== Error getting default service account, retry in 0.5 second =="
|
||||
fi
|
||||
done
|
||||
|
||||
log INFO "== Default service account in the ${SYSTEM_NAMESPACE} namespace has token ${token_found} =="
|
||||
|
||||
# Create admission_control objects if defined before any other addon services. If the limits
|
||||
# are defined in a namespace other than default, we should still create the limits for the
|
||||
# default namespace.
|
||||
for obj in $(find /etc/kubernetes/admission-controls \( -name \*.yaml -o -name \*.json \)); do
|
||||
start_addon "${obj}" 100 10 default &
|
||||
log INFO "++ obj ${obj} is created ++"
|
||||
done
|
||||
|
||||
# Start the apply loop.
|
||||
# Check if the configuration has changed recently - in case the user
|
||||
# created/updated/deleted the files on the master.
|
||||
log INFO "== Entering periodical apply loop at $(date -Is) =="
|
||||
while true; do
|
||||
start_sec=$(date +"%s")
|
||||
if is_leader; then
|
||||
ensure_addons
|
||||
reconcile_addons
|
||||
else
|
||||
log INFO "Not elected leader, going back to sleep."
|
||||
fi
|
||||
end_sec=$(date +"%s")
|
||||
len_sec=$((${end_sec}-${start_sec}))
|
||||
# subtract the time passed from the sleep time
|
||||
if [[ ${len_sec} -lt ${ADDON_CHECK_INTERVAL_SEC} ]]; then
|
||||
sleep_time=$((${ADDON_CHECK_INTERVAL_SEC}-${len_sec}))
|
||||
sleep ${sleep_time}
|
||||
fi
|
||||
done
|
4
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/namespace.yaml
generated
vendored
4
vendor/k8s.io/kubernetes/cluster/addons/addon-manager/namespace.yaml
generated
vendored
@@ -1,4 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kube-system
|
6
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/MAINTAINERS.md
generated
vendored
6
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/MAINTAINERS.md
generated
vendored
@@ -1,6 +0,0 @@
|
||||
# Maintainers
|
||||
|
||||
Matt Dupre <matt@projectcalico.org>, Casey Davenport <casey@tigera.io> and committers to the https://github.com/projectcalico/k8s-policy repository.
|
||||
|
||||
|
||||
[]()
|
10
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/OWNERS
generated
vendored
10
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/OWNERS
generated
vendored
@@ -1,10 +0,0 @@
|
||||
approvers:
|
||||
- bowei
|
||||
- caseydavenport
|
||||
- dnardo
|
||||
- fasaxc
|
||||
reviewers:
|
||||
- bowei
|
||||
- caseydavenport
|
||||
- dnardo
|
||||
- fasaxc
|
14
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/README.md
generated
vendored
14
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/README.md
generated
vendored
@@ -1,14 +0,0 @@
|
||||
# Calico Policy Controller
|
||||
==============
|
||||
|
||||
Calico is an implementation of the Kubernetes network policy API. The provided manifests install:
|
||||
|
||||
- A DaemonSet which runs Calico on each node in the cluster.
|
||||
- A Deployment which installs the Calico Typha agent.
|
||||
- A Service for the Calico Typha agent.
|
||||
|
||||
### Learn More
|
||||
|
||||
Learn more about Calico at https://docs.projectcalico.org
|
||||
|
||||
[]()
|
80
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-clusterrole.yaml
generated
vendored
80
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-clusterrole.yaml
generated
vendored
@@ -1,80 +0,0 @@
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: calico
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- namespaces
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- patch
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- watch
|
||||
- apiGroups: ["extensions"]
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- globalfelixconfigs
|
||||
- felixconfigurations
|
||||
- bgppeers
|
||||
- globalbgpconfigs
|
||||
- bgpconfigurations
|
||||
- ippools
|
||||
- globalnetworkpolicies
|
||||
- globalnetworksets
|
||||
- networkpolicies
|
||||
- clusterinformations
|
||||
- hostendpoints
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- watch
|
@@ -1,15 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico
|
||||
namespace: kube-system
|
@@ -1,14 +0,0 @@
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: calico-cpva
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["list"]
|
||||
- apiGroups: ["apps", "extensions"]
|
||||
resources: ["deployments", "daemonsets"]
|
||||
verbs: ["patch"]
|
@@ -1,15 +0,0 @@
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: calico-cpva
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-cpva
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: calico-cpva
|
||||
apiGroup: rbac.authorization.k8s.io
|
@@ -1,8 +0,0 @@
|
||||
kind: ServiceAccount
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: calico-cpva
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
175
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-node-daemonset.yaml
generated
vendored
175
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/calico-node-daemonset.yaml
generated
vendored
@@ -1,175 +0,0 @@
|
||||
kind: DaemonSet
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
k8s-app: calico-node
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-node
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
nodeSelector:
|
||||
projectcalico.org/ds-ready: "true"
|
||||
hostNetwork: true
|
||||
serviceAccountName: calico
|
||||
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
|
||||
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
|
||||
terminationGracePeriodSeconds: 0
|
||||
containers:
|
||||
# Runs calico/node container on each Kubernetes node. This
|
||||
# container programs network policy and routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: gcr.io/projectcalico-org/node:v2.6.7
|
||||
env:
|
||||
- name: CALICO_DISABLE_FILE_LOGGING
|
||||
value: "true"
|
||||
- name: CALICO_NETWORKING_BACKEND
|
||||
value: "none"
|
||||
- name: DATASTORE_TYPE
|
||||
value: "kubernetes"
|
||||
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
||||
value: "ACCEPT"
|
||||
- name: FELIX_HEALTHENABLED
|
||||
value: "true"
|
||||
- name: FELIX_IPV6SUPPORT
|
||||
value: "false"
|
||||
- name: FELIX_LOGSEVERITYSYS
|
||||
value: "none"
|
||||
- name: FELIX_LOGSEVERITYSCREEN
|
||||
value: "info"
|
||||
- name: FELIX_PROMETHEUSMETRICSENABLED
|
||||
value: "true"
|
||||
- name: FELIX_REPORTINGINTERVALSECS
|
||||
value: "0"
|
||||
- name: FELIX_TYPHAK8SSERVICENAME
|
||||
value: "calico-typha"
|
||||
- name: IP
|
||||
value: ""
|
||||
- name: NO_DEFAULT_POOLS
|
||||
value: "true"
|
||||
- name: NODENAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: WAIT_FOR_DATASTORE
|
||||
value: "true"
|
||||
securityContext:
|
||||
privileged: true
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /liveness
|
||||
port: 9099
|
||||
periodSeconds: 10
|
||||
initialDelaySeconds: 10
|
||||
failureThreshold: 6
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 9099
|
||||
periodSeconds: 10
|
||||
volumeMounts:
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /etc/calico
|
||||
name: etc-calico
|
||||
readOnly: true
|
||||
- mountPath: /var/run/calico
|
||||
name: var-run-calico
|
||||
readOnly: false
|
||||
- mountPath: /var/lib/calico
|
||||
name: var-lib-calico
|
||||
readOnly: false
|
||||
# This container installs the Calico CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
image: gcr.io/projectcalico-org/cni:v1.11.2
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
- name: CNI_CONF_NAME
|
||||
value: "10-calico.conflist"
|
||||
- name: CNI_NETWORK_CONFIG
|
||||
value: |-
|
||||
{
|
||||
"name": "k8s-pod-network",
|
||||
"cniVersion": "0.3.0",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "calico",
|
||||
"log_level": "debug",
|
||||
"datastore_type": "kubernetes",
|
||||
"nodename": "__KUBERNETES_NODE_NAME__",
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "usePodCidr"
|
||||
},
|
||||
"policy": {
|
||||
"type": "k8s",
|
||||
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
|
||||
},
|
||||
"kubernetes": {
|
||||
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
|
||||
"kubeconfig": "__KUBECONFIG_FILEPATH__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"capabilities": {"portMappings": true},
|
||||
"snat": true
|
||||
}
|
||||
]
|
||||
}
|
||||
- name: KUBERNETES_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: cni-net-dir
|
||||
volumes:
|
||||
# Used to ensure proper kmods are installed.
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
# Mount in the Felix config file from the host.
|
||||
- name: etc-calico
|
||||
hostPath:
|
||||
path: /etc/calico
|
||||
# Used to install CNI binaries.
|
||||
- name: cni-bin-dir
|
||||
hostPath:
|
||||
path: __CALICO_CNI_DIR__
|
||||
# Used to install CNI network config.
|
||||
- name: cni-net-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: var-run-calico
|
||||
hostPath:
|
||||
path: /var/run/calico
|
||||
- name: var-lib-calico
|
||||
hostPath:
|
||||
path: /var/lib/calico
|
||||
tolerations:
|
||||
# Make sure calico/node gets scheduled on all nodes.
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
@@ -1,22 +0,0 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: calico-node-vertical-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
data:
|
||||
node-autoscaler: |-
|
||||
{
|
||||
"calico-node": {
|
||||
"requests": {
|
||||
"cpu": {
|
||||
"base": "80m",
|
||||
"step": "20m",
|
||||
"nodesPerStep": 10,
|
||||
"max": "500m"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,38 +0,0 @@
|
||||
kind: Deployment
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
name: calico-node-vertical-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-node-autoscaler
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-node-autoscaler
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/cpvpa-amd64:v0.6.0
|
||||
name: autoscaler
|
||||
command:
|
||||
- /cpvpa
|
||||
- --target=daemonset/calico-node
|
||||
- --namespace=kube-system
|
||||
- --logtostderr=true
|
||||
- --poll-period-seconds=30
|
||||
- --v=2
|
||||
- --config-file=/etc/config/node-autoscaler
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/config
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: calico-node-vertical-autoscaler
|
||||
serviceAccountName: calico-cpva
|
@@ -1,8 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
@@ -1,15 +0,0 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: clusterinformations.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: ClusterInformation
|
||||
plural: clusterinformations
|
||||
singular: clusterinformation
|
@@ -1,15 +0,0 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: felixconfigurations.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: FelixConfiguration
|
||||
plural: felixconfigurations
|
||||
singular: felixconfiguration
|
@@ -1,15 +0,0 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalbgpconfigs.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: GlobalBGPConfig
|
||||
plural: globalbgpconfigs
|
||||
singular: globalbgpconfig
|
@@ -1,15 +0,0 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalfelixconfigs.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: GlobalFelixConfig
|
||||
plural: globalfelixconfigs
|
||||
singular: globalfelixconfig
|
@@ -1,15 +0,0 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalnetworkpolicies.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: GlobalNetworkPolicy
|
||||
plural: globalnetworkpolicies
|
||||
singular: globalnetworkpolicy
|
@@ -1,15 +0,0 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalnetworksets.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: GlobalNetworkSet
|
||||
plural: globalnetworksets
|
||||
singular: globalnetworkset
|
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/hostendpoints-crd.yaml
generated
vendored
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/hostendpoints-crd.yaml
generated
vendored
@@ -1,15 +0,0 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: hostendpoints.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: HostEndpoint
|
||||
plural: hostendpoints
|
||||
singular: hostendpoint
|
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/ippool-crd.yaml
generated
vendored
15
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/ippool-crd.yaml
generated
vendored
@@ -1,15 +0,0 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: ippools.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: IPPool
|
||||
plural: ippools
|
||||
singular: ippool
|
@@ -1,15 +0,0 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: networkpolicies.crd.projectcalico.org
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
scope: Namespaced
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: NetworkPolicy
|
||||
plural: networkpolicies
|
||||
singular: networkpolicy
|
@@ -1,16 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: gce:podsecuritypolicy:calico
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: gce:podsecuritypolicy:privileged
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico
|
||||
namespace: kube-system
|
71
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-deployment.yaml
generated
vendored
71
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-deployment.yaml
generated
vendored
@@ -1,71 +0,0 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: calico-typha
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
k8s-app: calico-typha
|
||||
spec:
|
||||
revisionHistoryLimit: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-typha
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
hostNetwork: true
|
||||
serviceAccountName: calico
|
||||
containers:
|
||||
- image: gcr.io/projectcalico-org/typha:v0.5.6
|
||||
name: calico-typha
|
||||
ports:
|
||||
- containerPort: 5473
|
||||
name: calico-typha
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: TYPHA_LOGFILEPATH
|
||||
value: "none"
|
||||
- name: TYPHA_LOGSEVERITYSYS
|
||||
value: "none"
|
||||
- name: TYPHA_LOGSEVERITYSCREEN
|
||||
value: "info"
|
||||
- name: TYPHA_PROMETHEUSMETRICSENABLED
|
||||
value: "true"
|
||||
- name: TYPHA_CONNECTIONREBALANCINGMODE
|
||||
value: "kubernetes"
|
||||
- name: TYPHA_PROMETHEUSMETRICSPORT
|
||||
value: "9093"
|
||||
- name: TYPHA_DATASTORETYPE
|
||||
value: "kubernetes"
|
||||
- name: TYPHA_REPORTINGINTERVALSECS
|
||||
value: "0"
|
||||
- name: TYPHA_MAXCONNECTIONSLOWERLIMIT
|
||||
value: "1"
|
||||
- name: TYPHA_HEALTHENABLED
|
||||
value: "true"
|
||||
volumeMounts:
|
||||
- mountPath: /etc/calico
|
||||
name: etc-calico
|
||||
readOnly: true
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /liveness
|
||||
port: 9098
|
||||
periodSeconds: 30
|
||||
initialDelaySeconds: 30
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 9098
|
||||
periodSeconds: 10
|
||||
volumes:
|
||||
- name: etc-calico
|
||||
hostPath:
|
||||
path: /etc/calico
|
@@ -1,11 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: typha-cpha
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["list"]
|
@@ -1,15 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: typha-cpha
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: typha-cpha
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: typha-cpha
|
||||
namespace: kube-system
|
@@ -1,24 +0,0 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: calico-typha-horizontal-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
data:
|
||||
ladder: |-
|
||||
{
|
||||
"coresToReplicas": [],
|
||||
"nodesToReplicas":
|
||||
[
|
||||
[1, 1],
|
||||
[10, 2],
|
||||
[100, 3],
|
||||
[250, 4],
|
||||
[500, 5],
|
||||
[1000, 6],
|
||||
[1500, 7],
|
||||
[2000, 8]
|
||||
]
|
||||
}
|
@@ -1,35 +0,0 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: calico-typha-horizontal-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-typha-autoscaler
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-typha-autoscaler
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2
|
||||
name: autoscaler
|
||||
command:
|
||||
- /cluster-proportional-autoscaler
|
||||
- --namespace=kube-system
|
||||
- --configmap=calico-typha-horizontal-autoscaler
|
||||
- --target=deployment/calico-typha
|
||||
- --logtostderr=true
|
||||
- --v=2
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
limits:
|
||||
cpu: 10m
|
||||
serviceAccountName: typha-cpha
|
@@ -1,15 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: typha-cpha
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["deployments/scale"]
|
||||
verbs: ["get", "update"]
|
@@ -1,16 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: typha-cpha
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: typha-cpha
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: typha-cpha
|
||||
namespace: kube-system
|
@@ -1,8 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: typha-cpha
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
17
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-service.yaml
generated
vendored
17
vendor/k8s.io/kubernetes/cluster/addons/calico-policy-controller/typha-service.yaml
generated
vendored
@@ -1,17 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: calico-typha
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
k8s-app: calico-typha
|
||||
spec:
|
||||
ports:
|
||||
- port: 5473
|
||||
protocol: TCP
|
||||
targetPort: calico-typha
|
||||
name: calico-typha
|
||||
selector:
|
||||
k8s-app: calico-typha
|
@@ -1,14 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: typha-cpva
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["list"]
|
||||
- apiGroups: ["apps", "extensions"]
|
||||
resources: ["deployments"]
|
||||
verbs: ["patch"]
|
@@ -1,15 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: typha-cpva
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: typha-cpva
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: typha-cpva
|
||||
namespace: kube-system
|
@@ -1,22 +0,0 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: calico-typha-vertical-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
data:
|
||||
typha-autoscaler: |-
|
||||
{
|
||||
"calico-typha": {
|
||||
"requests": {
|
||||
"cpu": {
|
||||
"base": "120m",
|
||||
"step": "80m",
|
||||
"nodesPerStep": 10,
|
||||
"max": "1000m"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,38 +0,0 @@
|
||||
kind: Deployment
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
name: calico-typha-vertical-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-typha-autoscaler
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-typha-autoscaler
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/cpvpa-amd64:v0.6.0
|
||||
name: autoscaler
|
||||
command:
|
||||
- /cpvpa
|
||||
- --target=deployment/calico-typha
|
||||
- --namespace=kube-system
|
||||
- --logtostderr=true
|
||||
- --poll-period-seconds=30
|
||||
- --v=2
|
||||
- --config-file=/etc/config/typha-autoscaler
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/config
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: calico-typha-vertical-autoscaler
|
||||
serviceAccountName: calico-cpva
|
@@ -1,8 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: typha-cpva
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
6
vendor/k8s.io/kubernetes/cluster/addons/cluster-loadbalancing/MAINTAINERS.md
generated
vendored
6
vendor/k8s.io/kubernetes/cluster/addons/cluster-loadbalancing/MAINTAINERS.md
generated
vendored
@@ -1,6 +0,0 @@
|
||||
# Maintainers
|
||||
|
||||
Prashanth.B <beeps@google.com>
|
||||
|
||||
|
||||
[]()
|
6
vendor/k8s.io/kubernetes/cluster/addons/cluster-loadbalancing/OWNERS
generated
vendored
6
vendor/k8s.io/kubernetes/cluster/addons/cluster-loadbalancing/OWNERS
generated
vendored
@@ -1,6 +0,0 @@
|
||||
approvers:
|
||||
- bowei
|
||||
- nicksardo
|
||||
reviewers:
|
||||
- bowei
|
||||
- nicksardo
|
112
vendor/k8s.io/kubernetes/cluster/addons/cluster-loadbalancing/glbc/README.md
generated
vendored
112
vendor/k8s.io/kubernetes/cluster/addons/cluster-loadbalancing/glbc/README.md
generated
vendored
@@ -1,112 +0,0 @@
|
||||
# GCE Load-Balancer Controller (GLBC) Cluster Addon
|
||||
|
||||
This cluster addon is composed of:
|
||||
* A [Google L7 LoadBalancer Controller](https://github.com/kubernetes/contrib/tree/master/ingress/controllers/gce)
|
||||
* A [404 default backend](https://github.com/kubernetes/contrib/tree/master/404-server) Service + RC
|
||||
|
||||
It relies on the [Ingress resource](https://kubernetes.io/docs/user-guide/ingress.md) only available in Kubernetes version 1.1 and beyond.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before you can receive traffic through the GCE L7 Loadbalancer Controller you need:
|
||||
* A Working Kubernetes 1.1 cluster
|
||||
* At least 1 Kubernetes [NodePort Service](https://kubernetes.io/docs/user-guide/services.md#type-nodeport) (this is the endpoint for your Ingress)
|
||||
* Firewall-rules that allow traffic to the NodePort service, as indicated by `kubectl` at Service creation time
|
||||
* Adequate quota, as mentioned in the next section
|
||||
* A single instance of the L7 Loadbalancer Controller pod (if you're using the default GCE setup, this should already be running in the `kube-system` namespace)
|
||||
|
||||
## Quota
|
||||
|
||||
GLBC is not aware of your GCE quota. As of this writing users get 3 [GCE Backend Services](https://cloud.google.com/compute/docs/load-balancing/http/backend-service) by default. If you plan on creating Ingresses for multiple Kubernetes Services, remember that each one requires a backend service, and request quota. Should you fail to do so the controller will poll periodically and grab the first free backend service slot it finds. You can view your quota:
|
||||
|
||||
```console
|
||||
$ gcloud compute project-info describe --project myproject
|
||||
```
|
||||
See [GCE documentation](https://cloud.google.com/compute/docs/resource-quotas#checking_your_quota) for how to request more.
|
||||
|
||||
## Latency
|
||||
|
||||
It takes ~1m to spin up a loadbalancer (this includes acquiring the public ip), and ~5-6m before the GCE api starts healthchecking backends. So as far as latency goes, here's what to expect:
|
||||
|
||||
Assume one creates the following simple Ingress:
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: test-ingress
|
||||
spec:
|
||||
backend:
|
||||
# This will just loopback to the default backend of GLBC
|
||||
serviceName: default-http-backend
|
||||
servicePort: 80
|
||||
```
|
||||
|
||||
* time, t=0
|
||||
```console
|
||||
$ kubectl get ing
|
||||
NAME RULE BACKEND ADDRESS
|
||||
test-ingress - default-http-backend:80
|
||||
$ kubectl describe ing
|
||||
No events.
|
||||
```
|
||||
|
||||
* time, t=1m
|
||||
```console
|
||||
$ kubectl get ing
|
||||
NAME RULE BACKEND ADDRESS
|
||||
test-ingress - default-http-backend:80 130.211.5.27
|
||||
|
||||
$ kubectl describe ing
|
||||
target-proxy: k8s-tp-default-test-ingress
|
||||
url-map: k8s-um-default-test-ingress
|
||||
backends: {"k8s-be-32342":"UNKNOWN"}
|
||||
forwarding-rule: k8s-fw-default-test-ingress
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubobjectPath Reason Message
|
||||
───────── ──────── ───── ──── ───────────── ────── ───────
|
||||
46s 46s 1 {loadbalancer-controller } Success Created loadbalancer 130.211.5.27
|
||||
```
|
||||
|
||||
* time, t=5m
|
||||
```console
|
||||
$ kubectl describe ing
|
||||
target-proxy: k8s-tp-default-test-ingress
|
||||
url-map: k8s-um-default-test-ingress
|
||||
backends: {"k8s-be-32342":"HEALTHY"}
|
||||
forwarding-rule: k8s-fw-default-test-ingress
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubobjectPath Reason Message
|
||||
───────── ──────── ───── ──── ───────────── ────── ───────
|
||||
46s 46s 1 {loadbalancer-controller } Success Created loadbalancer 130.211.5.27
|
||||
```
|
||||
|
||||
## Disabling GLBC
|
||||
|
||||
Since GLBC runs as a cluster addon, you cannot simply delete the RC. The easiest way to disable it is to do as follows:
|
||||
|
||||
* IFF you want to tear down existing L7 loadbalancers, hit the /delete-all-and-quit endpoint on the pod:
|
||||
|
||||
```console
|
||||
$ kubectl get pods --namespace=kube-system
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
l7-lb-controller-7bb21 1/1 Running 0 1h
|
||||
$ kubectl exec l7-lb-controller-7bb21 -c l7-lb-controller curl http://localhost:8081/delete-all-and-quit --namespace=kube-system
|
||||
$ kubectl logs l7-lb-controller-7b221 -c l7-lb-controller --follow
|
||||
...
|
||||
I1007 00:30:00.322528 1 main.go:160] Handled quit, awaiting pod deletion.
|
||||
```
|
||||
|
||||
* Nullify the RC (but don't delete it or the addon controller will "fix" it for you)
|
||||
```console
|
||||
$ kubectl scale rc l7-lb-controller --replicas=0 --namespace=kube-system
|
||||
```
|
||||
|
||||
## Limitations
|
||||
|
||||
* This cluster addon is still in the Beta phase. It behooves you to read through the GLBC documentation mentioned above and make sure there are no surprises.
|
||||
* The recommended way to tear down a cluster with active Ingresses is to either delete each Ingress, or hit the /delete-all-and-quit endpoint on GLBC as described below, before invoking a cluster teardown script (eg: kube-down.sh). You will have to manually cleanup GCE resources through the [cloud console](https://cloud.google.com/compute/docs/console#access) or [gcloud CLI](https://cloud.google.com/compute/docs/gcloud-compute/) if you simply tear down the cluster with active Ingresses.
|
||||
* All L7 Loadbalancers created by GLBC have a default backend. If you don't specify one in your Ingress, GLBC will assign the 404 default backend mentioned above.
|
||||
* All Kubernetes services must serve a 200 page on '/', or whatever custom value you've specified through GLBC's `--health-check-path argument`.
|
||||
* GLBC is not built for performance. Creating many Ingresses at a time can overwhelm it. It won't fall over, but will take its own time to churn through the Ingress queue. It doesn't understand concepts like fairness or backoff just yet.
|
||||
|
||||
[]()
|
@@ -1,44 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: l7-default-backend
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
kubernetes.io/name: "GLBC"
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: glbc
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
name: glbc
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
containers:
|
||||
- name: default-http-backend
|
||||
# Any image is permissible as long as:
|
||||
# 1. It serves a 404 page at /
|
||||
# 2. It serves 200 on a /healthz endpoint
|
||||
image: k8s.gcr.io/defaultbackend-amd64:1.5
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
22
vendor/k8s.io/kubernetes/cluster/addons/cluster-loadbalancing/glbc/default-svc.yaml
generated
vendored
22
vendor/k8s.io/kubernetes/cluster/addons/cluster-loadbalancing/glbc/default-svc.yaml
generated
vendored
@@ -1,22 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
# This must match the --default-backend-service argument of the l7 lb
|
||||
# controller and is required because GCE mandates a default backend.
|
||||
name: default-http-backend
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "GLBCDefaultBackend"
|
||||
spec:
|
||||
# The default backend must be of type NodePort.
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
k8s-app: glbc
|
8
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/OWNERS
generated
vendored
8
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/OWNERS
generated
vendored
@@ -1,8 +0,0 @@
|
||||
approvers:
|
||||
- DirectXMan12
|
||||
- kawych
|
||||
- piosz
|
||||
reviewers:
|
||||
- DirectXMan12
|
||||
- kawych
|
||||
- piosz
|
21
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/README.md
generated
vendored
21
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/README.md
generated
vendored
@@ -1,21 +0,0 @@
|
||||
# Kubernetes Monitoring
|
||||
|
||||
[Heapster](https://github.com/kubernetes/heapster) enables monitoring and performance analysis in Kubernetes Clusters.
|
||||
Heapster collects signals from kubelets and the api server, processes them, and exports them via REST APIs or to a configurable timeseries storage backend.
|
||||
|
||||
More details can be found in [Monitoring user guide](http://kubernetes.io/docs/user-guide/monitoring/).
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Heapster supports up to 30 pods per cluster node. In clusters where there are more running pods, Heapster may be throttled or fail with OOM error. Starting with Kubernetes 1.9.2, Heapster resource requirements may be overwritten manually. [Learn more about Addon Resizer configuration](https://github.com/kubernetes/autoscaler/tree/master/addon-resizer#addon-resizer-configuration)
|
||||
|
||||
### Important notices
|
||||
|
||||
Decreasing resource requirements for cluster addons may cause system instability. The effects may include (but are not limited to):
|
||||
- Metrics not being exported
|
||||
- Horizontal Pod Autoscaler not working
|
||||
- `kubectl top` not working
|
||||
|
||||
Overwritten configuration persists through cluster updates, therefore may cause all effects above after a cluster update.
|
||||
|
||||
[]()
|
161
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/google/heapster-controller.yaml
generated
vendored
161
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/google/heapster-controller.yaml
generated
vendored
@@ -1,161 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: heapster-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
NannyConfiguration: |-
|
||||
apiVersion: nannyconfig/v1alpha1
|
||||
kind: NannyConfiguration
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: eventer-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
NannyConfiguration: |-
|
||||
apiVersion: nannyconfig/v1alpha1
|
||||
kind: NannyConfiguration
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.6.0-beta.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.6.0-beta.1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.6.0-beta.1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.6.0-beta.1
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8082
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 180
|
||||
timeoutSeconds: 5
|
||||
command:
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --sink=gcm
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: eventer
|
||||
command:
|
||||
- /eventer
|
||||
- --source=kubernetes:''
|
||||
- --sink=gcl
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.3
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
volumeMounts:
|
||||
- name: heapster-config-volume
|
||||
mountPath: /etc/config
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --config-dir=/etc/config
|
||||
- --cpu={{ base_metrics_cpu }}
|
||||
- --extra-cpu={{ metrics_cpu_per_node }}m
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{metrics_memory_per_node}}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
# Specifies the smallest cluster (defined in number of nodes)
|
||||
# resources will be scaled to.
|
||||
- --minClusterSize={{ heapster_min_cluster_size }}
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.2
|
||||
name: eventer-nanny
|
||||
resources:
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- name: eventer-config-volume
|
||||
mountPath: /etc/config
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --config-dir=/etc/config
|
||||
- --cpu=100m
|
||||
- --extra-cpu=0m
|
||||
- --memory={{base_eventer_memory}}
|
||||
- --extra-memory={{eventer_memory_per_node}}Ki
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=eventer
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
volumes:
|
||||
- name: heapster-config-volume
|
||||
configMap:
|
||||
name: heapster-config
|
||||
- name: eventer-config-volume
|
||||
configMap:
|
||||
name: eventer-config
|
||||
serviceAccountName: heapster
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
15
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/google/heapster-service.yaml
generated
vendored
15
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/google/heapster-service.yaml
generated
vendored
@@ -1,15 +0,0 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "Heapster"
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8082
|
||||
selector:
|
||||
k8s-app: heapster
|
@@ -1,162 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: heapster-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
NannyConfiguration: |-
|
||||
apiVersion: nannyconfig/v1alpha1
|
||||
kind: NannyConfiguration
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: eventer-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
NannyConfiguration: |-
|
||||
apiVersion: nannyconfig/v1alpha1
|
||||
kind: NannyConfiguration
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.6.0-beta.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.6.0-beta.1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.6.0-beta.1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.6.0-beta.1
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8082
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 180
|
||||
timeoutSeconds: 5
|
||||
command:
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --sink=influxdb:http://monitoring-influxdb:8086
|
||||
- --sink=gcm:?metrics=autoscaling
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: eventer
|
||||
command:
|
||||
- /eventer
|
||||
- --source=kubernetes:''
|
||||
- --sink=gcl
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.3
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
volumeMounts:
|
||||
- name: heapster-config-volume
|
||||
mountPath: /etc/config
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --config-dir=/etc/config
|
||||
- --cpu={{ base_metrics_cpu }}
|
||||
- --extra-cpu={{ metrics_cpu_per_node }}m
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{ metrics_memory_per_node }}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
# Specifies the smallest cluster (defined in number of nodes)
|
||||
# resources will be scaled to.
|
||||
- --minClusterSize={{ heapster_min_cluster_size }}
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.2
|
||||
name: eventer-nanny
|
||||
resources:
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
volumeMounts:
|
||||
- name: eventer-config-volume
|
||||
mountPath: /etc/config
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --config-dir=/etc/config
|
||||
- --cpu=100m
|
||||
- --extra-cpu=0m
|
||||
- --memory={{ base_eventer_memory }}
|
||||
- --extra-memory={{ eventer_memory_per_node }}Ki
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=eventer
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
volumes:
|
||||
- name: heapster-config-volume
|
||||
configMap:
|
||||
name: heapster-config
|
||||
- name: eventer-config-volume
|
||||
configMap:
|
||||
name: eventer-config
|
||||
serviceAccountName: heapster
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
58
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/heapster-rbac.yaml
generated
vendored
58
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/heapster-rbac.yaml
generated
vendored
@@ -1,58 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: heapster-binding
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:heapster
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
---
|
||||
# Heapster's pod_nanny monitors the heapster deployment & its pod(s), and scales
|
||||
# the resources of the deployment if necessary.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: system:pod-nanny
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: heapster-binding
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: system:pod-nanny
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
---
|
19
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/influxdb/grafana-service.yaml
generated
vendored
19
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/influxdb/grafana-service.yaml
generated
vendored
@@ -1,19 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: monitoring-grafana
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "Grafana"
|
||||
spec:
|
||||
# On production clusters, consider setting up auth for grafana, and
|
||||
# exposing Grafana either using a LoadBalancer or a public IP.
|
||||
# type: LoadBalancer
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: ui
|
||||
selector:
|
||||
k8s-app: influxGrafana
|
161
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml
generated
vendored
161
vendor/k8s.io/kubernetes/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml
generated
vendored
@@ -1,161 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: heapster-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
NannyConfiguration: |-
|
||||
apiVersion: nannyconfig/v1alpha1
|
||||
kind: NannyConfiguration
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: eventer-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
NannyConfiguration: |-
|
||||
apiVersion: nannyconfig/v1alpha1
|
||||
kind: NannyConfiguration
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.6.0-beta.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.6.0-beta.1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.6.0-beta.1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.6.0-beta.1
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8082
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 180
|
||||
timeoutSeconds: 5
|
||||
command:
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --sink=influxdb:http://monitoring-influxdb:8086
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: eventer
|
||||
command:
|
||||
- /eventer
|
||||
- --source=kubernetes:''
|
||||
- --sink=influxdb:http://monitoring-influxdb:8086
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.3
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- name: heapster-config-volume
|
||||
mountPath: /etc/config
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --config-dir=/etc/config
|
||||
- --cpu={{ base_metrics_cpu }}
|
||||
- --extra-cpu={{ metrics_cpu_per_node }}m
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{ metrics_memory_per_node }}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
# Specifies the smallest cluster (defined in number of nodes)
|
||||
# resources will be scaled to.
|
||||
- --minClusterSize={{ heapster_min_cluster_size }}
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.2
|
||||
name: eventer-nanny
|
||||
resources:
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- name: eventer-config-volume
|
||||
mountPath: /etc/config
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --config-dir=/etc/config
|
||||
- --cpu=100m
|
||||
- --extra-cpu=0m
|
||||
- --memory={{ base_eventer_memory }}
|
||||
- --extra-memory={{ eventer_memory_per_node }}Ki
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=eventer
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
volumes:
|
||||
- name: heapster-config-volume
|
||||
configMap:
|
||||
name: heapster-config
|
||||
- name: eventer-config-volume
|
||||
configMap:
|
||||
name: eventer-config
|
||||
serviceAccountName: heapster
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
@@ -1,15 +0,0 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "Heapster"
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8082
|
||||
selector:
|
||||
k8s-app: heapster
|
@@ -1,87 +0,0 @@
|
||||
kind: Deployment
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
name: monitoring-influxdb-grafana-v4
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: influxGrafana
|
||||
version: v4
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: influxGrafana
|
||||
version: v4
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: influxGrafana
|
||||
version: v4
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: influxdb
|
||||
image: k8s.gcr.io/heapster-influxdb-amd64:v1.3.3
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8083
|
||||
- name: api
|
||||
containerPort: 8086
|
||||
volumeMounts:
|
||||
- name: influxdb-persistent-storage
|
||||
mountPath: /data
|
||||
- name: grafana
|
||||
image: k8s.gcr.io/heapster-grafana-amd64:v4.4.3
|
||||
env:
|
||||
resources:
|
||||
# keep request = limit to keep this container in guaranteed class
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
env:
|
||||
# This variable is required to setup templates in Grafana.
|
||||
- name: INFLUXDB_SERVICE_URL
|
||||
value: http://monitoring-influxdb:8086
|
||||
# The following env variables are required to make Grafana accessible via
|
||||
# the kubernetes api-server proxy. On production clusters, we recommend
|
||||
# removing these env variables, setup auth for grafana, and expose the grafana
|
||||
# service using a LoadBalancer or a public IP.
|
||||
- name: GF_AUTH_BASIC_ENABLED
|
||||
value: "false"
|
||||
- name: GF_AUTH_ANONYMOUS_ENABLED
|
||||
value: "true"
|
||||
- name: GF_AUTH_ANONYMOUS_ORG_ROLE
|
||||
value: Admin
|
||||
- name: GF_SERVER_ROOT_URL
|
||||
value: /api/v1/namespaces/kube-system/services/monitoring-grafana/proxy/
|
||||
ports:
|
||||
- name: ui
|
||||
containerPort: 3000
|
||||
volumeMounts:
|
||||
- name: grafana-persistent-storage
|
||||
mountPath: /var
|
||||
volumes:
|
||||
- name: influxdb-persistent-storage
|
||||
emptyDir: {}
|
||||
- name: grafana-persistent-storage
|
||||
emptyDir: {}
|
@@ -1,19 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: monitoring-influxdb
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "InfluxDB"
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 8083
|
||||
targetPort: 8083
|
||||
- name: api
|
||||
port: 8086
|
||||
targetPort: 8086
|
||||
selector:
|
||||
k8s-app: influxGrafana
|
@@ -1,127 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: heapster-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
NannyConfiguration: |-
|
||||
apiVersion: nannyconfig/v1alpha1
|
||||
kind: NannyConfiguration
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.6.0-beta.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.6.0-beta.1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.6.0-beta.1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.6.0-beta.1
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8082
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 180
|
||||
timeoutSeconds: 5
|
||||
command:
|
||||
# On GCP, container.googleapis.com/instance_id node annotation is used to provide instance_id label for Stackdriver
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:?host_id_annotation=container.googleapis.com/instance_id
|
||||
- --sink=stackdriver:?cluster_name={{ cluster_name }}&use_old_resources={{ use_old_resources }}&use_new_resources={{ use_new_resources }}&min_interval_sec=100&batch_export_timeout_sec=110&cluster_location={{ cluster_location }}
|
||||
# BEGIN_PROMETHEUS_TO_SD
|
||||
- name: prom-to-sd
|
||||
image: k8s.gcr.io/prometheus-to-sd:v0.3.1
|
||||
command:
|
||||
- /monitor
|
||||
- --source=heapster:http://localhost:8082?whitelisted=stackdriver_requests_count,stackdriver_timeseries_count
|
||||
- --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons
|
||||
- --api-override={{ prometheus_to_sd_endpoint }}
|
||||
- --pod-id=$(POD_NAME)
|
||||
- --namespace-id=$(POD_NAMESPACE)
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
# END_PROMETHEUS_TO_SD
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.3
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
volumeMounts:
|
||||
- name: heapster-config-volume
|
||||
mountPath: /etc/config
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --config-dir=/etc/config
|
||||
- --cpu={{ base_metrics_cpu }}
|
||||
- --extra-cpu={{ metrics_cpu_per_node }}m
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{metrics_memory_per_node}}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
# Specifies the smallest cluster (defined in number of nodes)
|
||||
# resources will be scaled to.
|
||||
- --minClusterSize={{ heapster_min_cluster_size }}
|
||||
volumes:
|
||||
- name: heapster-config-volume
|
||||
configMap:
|
||||
name: heapster-config
|
||||
serviceAccountName: heapster
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
@@ -1,15 +0,0 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "Heapster"
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8082
|
||||
selector:
|
||||
k8s-app: heapster
|
@@ -1,105 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: heapster-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
NannyConfiguration: |-
|
||||
apiVersion: nannyconfig/v1alpha1
|
||||
kind: NannyConfiguration
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.6.0-beta.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.6.0-beta.1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.6.0-beta.1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.6.0-beta.1
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8082
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 180
|
||||
timeoutSeconds: 5
|
||||
command:
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- image: k8s.gcr.io/addon-resizer:1.8.3
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: {{ nanny_memory }}
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- name: heapster-config-volume
|
||||
mountPath: /etc/config
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --config-dir=/etc/config
|
||||
- --cpu={{ base_metrics_cpu }}
|
||||
- --extra-cpu={{ metrics_cpu_per_node }}m
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{ metrics_memory_per_node }}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.6.0-beta.1
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
# Specifies the smallest cluster (defined in number of nodes)
|
||||
# resources will be scaled to.
|
||||
- --minClusterSize={{ heapster_min_cluster_size }}
|
||||
volumes:
|
||||
- name: heapster-config-volume
|
||||
configMap:
|
||||
name: heapster-config
|
||||
serviceAccountName: heapster
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
@@ -1,15 +0,0 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "Heapster"
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8082
|
||||
selector:
|
||||
k8s-app: heapster
|
6
vendor/k8s.io/kubernetes/cluster/addons/dashboard/MAINTAINERS.md
generated
vendored
6
vendor/k8s.io/kubernetes/cluster/addons/dashboard/MAINTAINERS.md
generated
vendored
@@ -1,6 +0,0 @@
|
||||
# Maintainers
|
||||
|
||||
Piotr Bryk <bryk@google.com> and committers to the https://github.com/kubernetes/dashboard repository.
|
||||
|
||||
|
||||
[]()
|
12
vendor/k8s.io/kubernetes/cluster/addons/dashboard/OWNERS
generated
vendored
12
vendor/k8s.io/kubernetes/cluster/addons/dashboard/OWNERS
generated
vendored
@@ -1,12 +0,0 @@
|
||||
approvers:
|
||||
- bryk
|
||||
reviewers:
|
||||
- cheld
|
||||
- cupofcat
|
||||
- danielromlein
|
||||
- floreks
|
||||
- ianlewis
|
||||
- konryd
|
||||
- maciaszczykm
|
||||
- mhenc
|
||||
- rf232
|
10
vendor/k8s.io/kubernetes/cluster/addons/dashboard/README.md
generated
vendored
10
vendor/k8s.io/kubernetes/cluster/addons/dashboard/README.md
generated
vendored
@@ -1,10 +0,0 @@
|
||||
# Kubernetes Dashboard
|
||||
|
||||
Kubernetes Dashboard is a general purpose, web-based UI for Kubernetes clusters.
|
||||
It allows users to manage applications running in the cluster, troubleshoot them,
|
||||
as well as manage the cluster itself.
|
||||
|
||||
Learn more at: https://github.com/kubernetes/dashboard
|
||||
|
||||
|
||||
[]()
|
9
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-configmap.yaml
generated
vendored
9
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-configmap.yaml
generated
vendored
@@ -1,9 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
# Allows editing resource and makes sure it is created first.
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kube-system
|
69
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-controller.yaml
generated
vendored
69
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-controller.yaml
generated
vendored
@@ -1,69 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 300Mi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 100Mi
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
# PLATFORM-SPECIFIC ARGS HERE
|
||||
- --auto-generate-certificates
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
- name: tmp-volume
|
||||
mountPath: /tmp
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
45
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-rbac.yaml
generated
vendored
45
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-rbac.yaml
generated
vendored
@@ -1,45 +0,0 @@
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
rules:
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics from heapster.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
|
||||
verbs: ["get"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard-minimal
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
21
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-secret.yaml
generated
vendored
21
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-secret.yaml
generated
vendored
@@ -1,21 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
# Allows editing resource and makes sure it is created first.
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kube-system
|
||||
type: Opaque
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
# Allows editing resource and makes sure it is created first.
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kube-system
|
||||
type: Opaque
|
15
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-service.yaml
generated
vendored
15
vendor/k8s.io/kubernetes/cluster/addons/dashboard/dashboard-service.yaml
generated
vendored
@@ -1,15 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
59
vendor/k8s.io/kubernetes/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml
generated
vendored
59
vendor/k8s.io/kubernetes/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml
generated
vendored
@@ -1,59 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: nvidia-gpu-device-plugin
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: nvidia-gpu-device-plugin
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: nvidia-gpu-device-plugin
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: nvidia-gpu-device-plugin
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: cloud.google.com/gke-accelerator
|
||||
operator: Exists
|
||||
tolerations:
|
||||
- operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
- operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
volumes:
|
||||
- name: device-plugin
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/device-plugins
|
||||
- name: dev
|
||||
hostPath:
|
||||
path: /dev
|
||||
containers:
|
||||
- image: "k8s.gcr.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e"
|
||||
command: ["/usr/bin/nvidia-gpu-device-plugin", "-logtostderr"]
|
||||
name: nvidia-gpu-device-plugin
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 10Mi
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: 10Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: device-plugin
|
||||
mountPath: /device-plugin
|
||||
- name: dev
|
||||
mountPath: /dev
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
6
vendor/k8s.io/kubernetes/cluster/addons/dns-horizontal-autoscaler/MAINTAINERS.md
generated
vendored
6
vendor/k8s.io/kubernetes/cluster/addons/dns-horizontal-autoscaler/MAINTAINERS.md
generated
vendored
@@ -1,6 +0,0 @@
|
||||
# Maintainers
|
||||
|
||||
Zihong Zheng <zihongz@google.com>
|
||||
|
||||
|
||||
[]()
|
6
vendor/k8s.io/kubernetes/cluster/addons/dns-horizontal-autoscaler/OWNERS
generated
vendored
6
vendor/k8s.io/kubernetes/cluster/addons/dns-horizontal-autoscaler/OWNERS
generated
vendored
@@ -1,6 +0,0 @@
|
||||
approvers:
|
||||
- bowei
|
||||
- mrhohn
|
||||
reviewers:
|
||||
- bowei
|
||||
- mrhohn
|
14
vendor/k8s.io/kubernetes/cluster/addons/dns-horizontal-autoscaler/README.md
generated
vendored
14
vendor/k8s.io/kubernetes/cluster/addons/dns-horizontal-autoscaler/README.md
generated
vendored
@@ -1,14 +0,0 @@
|
||||
# DNS Horizontal Autoscaler
|
||||
|
||||
DNS Horizontal Autoscaler enables horizontal autoscaling feature for DNS service
|
||||
in Kubernetes clusters. This autoscaler runs as a Deployment. It collects cluster
|
||||
status from the APIServer, horizontally scales the number of DNS backends based
|
||||
on demand. Autoscaling parameters could be tuned by modifying the `kube-dns-autoscaler`
|
||||
ConfigMap in `kube-system` namespace.
|
||||
|
||||
Learn more about:
|
||||
- Usage: http://kubernetes.io/docs/tasks/administer-cluster/dns-horizontal-autoscaling/
|
||||
- Implementation: https://github.com/kubernetes-incubator/cluster-proportional-autoscaler/
|
||||
|
||||
|
||||
[]()
|
@@ -1,104 +0,0 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
kind: ServiceAccount
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kube-dns-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: system:kube-dns-autoscaler
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["replicationcontrollers/scale"]
|
||||
verbs: ["get", "update"]
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["deployments/scale", "replicasets/scale"]
|
||||
verbs: ["get", "update"]
|
||||
# Remove the configmaps rule once below issue is fixed:
|
||||
# kubernetes-incubator/cluster-proportional-autoscaler#16
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "create"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: system:kube-dns-autoscaler
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kube-dns-autoscaler
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: system:kube-dns-autoscaler
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kube-dns-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns-autoscaler
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns-autoscaler
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns-autoscaler
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: autoscaler
|
||||
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.2.0
|
||||
resources:
|
||||
requests:
|
||||
cpu: "20m"
|
||||
memory: "10Mi"
|
||||
command:
|
||||
- /cluster-proportional-autoscaler
|
||||
- --namespace=kube-system
|
||||
- --configmap=kube-dns-autoscaler
|
||||
# Should keep target in sync with cluster/addons/dns/kube-dns.yaml.base
|
||||
- --target={{.Target}}
|
||||
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
|
||||
# If using small nodes, "nodesPerReplica" should dominate.
|
||||
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
|
||||
- --logtostderr=true
|
||||
- --v=2
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
serviceAccountName: kube-dns-autoscaler
|
6
vendor/k8s.io/kubernetes/cluster/addons/dns/OWNERS
generated
vendored
6
vendor/k8s.io/kubernetes/cluster/addons/dns/OWNERS
generated
vendored
@@ -1,6 +0,0 @@
|
||||
approvers:
|
||||
- bowei
|
||||
- mrhohn
|
||||
reviewers:
|
||||
- bowei
|
||||
- mrhohn
|
34
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/Makefile
generated
vendored
34
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/Makefile
generated
vendored
@@ -1,34 +0,0 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Makefile for the kubedns underscore templates to Salt/Pillar and other formats.
|
||||
|
||||
# If you update the *.base templates, please run this Makefile before pushing.
|
||||
#
|
||||
# Usage:
|
||||
# make
|
||||
|
||||
all: transform
|
||||
|
||||
# .base -> .in pattern rule
|
||||
%.in: %.base
|
||||
sed -f transforms2salt.sed $< | sed s/__SOURCE_FILENAME__/$</g > $@
|
||||
|
||||
# .base -> .sed pattern rule
|
||||
%.sed: %.base
|
||||
sed -f transforms2sed.sed $< | sed s/__SOURCE_FILENAME__/$</g > $@
|
||||
|
||||
transform: coredns.yaml.in coredns.yaml.sed
|
||||
|
||||
.PHONY: transform
|
183
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/coredns.yaml.base
generated
vendored
183
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/coredns.yaml.base
generated
vendored
@@ -1,183 +0,0 @@
|
||||
# __MACHINE_GENERATED_WARNING__
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: system:coredns
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
- pods
|
||||
- namespaces
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: system:coredns
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:coredns
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
Corefile: |
|
||||
.:53 {
|
||||
errors
|
||||
health
|
||||
kubernetes __PILLAR__DNS__DOMAIN__ in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
upstream
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
prometheus :9153
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
# replicas: not specified here:
|
||||
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
||||
# 2. Default is 1.
|
||||
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
serviceAccountName: coredns
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: coredns
|
||||
image: k8s.gcr.io/coredns:1.2.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
args: [ "-conf", "/etc/coredns/Corefile" ]
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/coredns
|
||||
readOnly: true
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- containerPort: 9153
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
dnsPolicy: Default
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: coredns
|
||||
items:
|
||||
- key: Corefile
|
||||
path: Corefile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/port: "9153"
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: __PILLAR__DNS__SERVER__
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
183
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/coredns.yaml.in
generated
vendored
183
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/coredns.yaml.in
generated
vendored
@@ -1,183 +0,0 @@
|
||||
# Warning: This is a file generated from the base underscore template file: coredns.yaml.base
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: system:coredns
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
- pods
|
||||
- namespaces
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: system:coredns
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:coredns
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
Corefile: |
|
||||
.:53 {
|
||||
errors
|
||||
health
|
||||
kubernetes {{ pillar['dns_domain'] }} in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
upstream
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
prometheus :9153
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
# replicas: not specified here:
|
||||
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
||||
# 2. Default is 1.
|
||||
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
serviceAccountName: coredns
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: coredns
|
||||
image: k8s.gcr.io/coredns:1.2.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
args: [ "-conf", "/etc/coredns/Corefile" ]
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/coredns
|
||||
readOnly: true
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- containerPort: 9153
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
dnsPolicy: Default
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: coredns
|
||||
items:
|
||||
- key: Corefile
|
||||
path: Corefile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/port: "9153"
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: {{ pillar['dns_server'] }}
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
183
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/coredns.yaml.sed
generated
vendored
183
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/coredns.yaml.sed
generated
vendored
@@ -1,183 +0,0 @@
|
||||
# Warning: This is a file generated from the base underscore template file: coredns.yaml.base
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: system:coredns
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
- pods
|
||||
- namespaces
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: system:coredns
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:coredns
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
Corefile: |
|
||||
.:53 {
|
||||
errors
|
||||
health
|
||||
kubernetes $DNS_DOMAIN in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
upstream
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
prometheus :9153
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
# replicas: not specified here:
|
||||
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
||||
# 2. Default is 1.
|
||||
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
serviceAccountName: coredns
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: coredns
|
||||
image: k8s.gcr.io/coredns:1.2.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
args: [ "-conf", "/etc/coredns/Corefile" ]
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/coredns
|
||||
readOnly: true
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- containerPort: 9153
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
dnsPolicy: Default
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: coredns
|
||||
items:
|
||||
- key: Corefile
|
||||
path: Corefile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/port: "9153"
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: $DNS_SERVER_IP
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
4
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/transforms2salt.sed
generated
vendored
4
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/transforms2salt.sed
generated
vendored
@@ -1,4 +0,0 @@
|
||||
s/__PILLAR__DNS__SERVER__/{{ pillar['dns_server'] }}/g
|
||||
s/__PILLAR__DNS__DOMAIN__/{{ pillar['dns_domain'] }}/g
|
||||
s/__PILLAR__CLUSTER_CIDR__/{{ pillar['service_cluster_ip_range'] }}/g
|
||||
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g
|
4
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/transforms2sed.sed
generated
vendored
4
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns/transforms2sed.sed
generated
vendored
@@ -1,4 +0,0 @@
|
||||
s/__PILLAR__DNS__SERVER__/$DNS_SERVER_IP/g
|
||||
s/__PILLAR__DNS__DOMAIN__/$DNS_DOMAIN/g
|
||||
s/__PILLAR__CLUSTER_CIDR__/$SERVICE_CLUSTER_IP_RANGE/g
|
||||
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g
|
34
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/Makefile
generated
vendored
34
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/Makefile
generated
vendored
@@ -1,34 +0,0 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Makefile for the kubedns underscore templates to Salt/Pillar and other formats.
|
||||
|
||||
# If you update the *.base templates, please run this Makefile before pushing.
|
||||
#
|
||||
# Usage:
|
||||
# make
|
||||
|
||||
all: transform
|
||||
|
||||
# .base -> .in pattern rule
|
||||
%.in: %.base
|
||||
sed -f transforms2salt.sed $< | sed s/__SOURCE_FILENAME__/$</g > $@
|
||||
|
||||
# .base -> .sed pattern rule
|
||||
%.sed: %.base
|
||||
sed -f transforms2sed.sed $< | sed s/__SOURCE_FILENAME__/$</g > $@
|
||||
|
||||
transform: kube-dns.yaml.in kube-dns.yaml.sed
|
||||
|
||||
.PHONY: transform
|
64
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/README.md
generated
vendored
64
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/README.md
generated
vendored
@@ -1,64 +0,0 @@
|
||||
# kube-dns
|
||||
|
||||
`kube-dns` schedules DNS Pods and Service on the cluster, other pods in cluster
|
||||
can use the DNS Service’s IP to resolve DNS names.
|
||||
|
||||
* [Administrators guide](http://kubernetes.io/docs/admin/dns/)
|
||||
* [Code repository](http://www.github.com/kubernetes/dns)
|
||||
|
||||
## Manually scale kube-dns Deployment
|
||||
|
||||
kube-dns creates only one DNS Pod by default. If
|
||||
[dns-horizontal-autoscaler](../../dns-horizontal-autoscaler/)
|
||||
is not enabled, you may need to manually scale kube-dns Deployment.
|
||||
|
||||
Please use below `kubectl scale` command to scale:
|
||||
```
|
||||
kubectl --namespace=kube-system scale deployment kube-dns --replicas=<NUM_YOU_WANT>
|
||||
```
|
||||
|
||||
Do not use `kubectl edit` to modify kube-dns Deployment object if it is
|
||||
controlled by [Addon Manager](../../addon-manager/). Otherwise the modifications
|
||||
will be clobbered, in addition the replicas count for kube-dns Deployment will
|
||||
be reset to 1. See [Cluster add-ons README](../../README.md) and
|
||||
[#36411](https://github.com/kubernetes/kubernetes/issues/36411) for reference.
|
||||
|
||||
## kube-dns addon templates
|
||||
|
||||
This directory contains the base UNDERSCORE templates that can be used to
|
||||
generate the kube-dns.yaml.in needed in Salt format.
|
||||
|
||||
Due to a varied preference in templating language choices, the transform
|
||||
Makefile in this directory should be enhanced to generate all required formats
|
||||
from the base underscore templates.
|
||||
|
||||
**N.B.**: When you add a parameter you should also update the various scripts
|
||||
that supply values for your new parameter. Here is one way you might find those
|
||||
scripts:
|
||||
|
||||
```
|
||||
cd kubernetes && git grep 'kube-dns.yaml'
|
||||
```
|
||||
|
||||
### Base Template files
|
||||
|
||||
These are the authoritative base templates.
|
||||
Run 'make' to generate the Salt and Sed yaml templates from these.
|
||||
|
||||
```
|
||||
kube-dns.yaml.base
|
||||
```
|
||||
|
||||
### Generated Salt files
|
||||
|
||||
```
|
||||
kube-dns.yaml.in
|
||||
```
|
||||
|
||||
### Generated Sed files
|
||||
|
||||
```
|
||||
kube-dns.yaml.sed
|
||||
```
|
||||
|
||||
[]()
|
214
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/kube-dns.yaml.base
generated
vendored
214
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/kube-dns.yaml.base
generated
vendored
@@ -1,214 +0,0 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
|
||||
# in sync with this file.
|
||||
|
||||
# __MACHINE_GENERATED_WARNING__
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: __PILLAR__DNS__SERVER__
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
# replicas: not specified here:
|
||||
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
||||
# 2. Default is 1.
|
||||
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 10%
|
||||
maxUnavailable: 0
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
volumes:
|
||||
- name: kube-dns-config
|
||||
configMap:
|
||||
name: kube-dns
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
# guaranteed class. Currently, this container falls into the
|
||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/kubedns
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 8081
|
||||
scheme: HTTP
|
||||
# we poll on pod startup for the Kubernetes master service and
|
||||
# only setup the /readiness HTTP server once that's available.
|
||||
initialDelaySeconds: 3
|
||||
timeoutSeconds: 5
|
||||
args:
|
||||
- --domain=__PILLAR__DNS__DOMAIN__.
|
||||
- --dns-port=10053
|
||||
- --config-dir=/kube-dns-config
|
||||
- --v=2
|
||||
env:
|
||||
- name: PROMETHEUS_PORT
|
||||
value: "10055"
|
||||
ports:
|
||||
- containerPort: 10053
|
||||
name: dns-local
|
||||
protocol: UDP
|
||||
- containerPort: 10053
|
||||
name: dns-tcp-local
|
||||
protocol: TCP
|
||||
- containerPort: 10055
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- -v=2
|
||||
- -logtostderr
|
||||
- -configDir=/etc/k8s/dns/dnsmasq-nanny
|
||||
- -restartDnsmasq=true
|
||||
- --
|
||||
- -k
|
||||
- --cache-size=1000
|
||||
- --no-negcache
|
||||
- --dns-loop-detect
|
||||
- --log-facility=-
|
||||
- --server=/__PILLAR__DNS__DOMAIN__/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
- --server=/ip6.arpa/127.0.0.1#10053
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
|
||||
resources:
|
||||
requests:
|
||||
cpu: 150m
|
||||
memory: 20Mi
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: k8s.gcr.io/k8s-dns-sidecar:1.14.13
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- --v=2
|
||||
- --logtostderr
|
||||
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.__PILLAR__DNS__DOMAIN__,5,SRV
|
||||
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.__PILLAR__DNS__DOMAIN__,5,SRV
|
||||
ports:
|
||||
- containerPort: 10054
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
memory: 20Mi
|
||||
cpu: 10m
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
||||
serviceAccountName: kube-dns
|
214
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/kube-dns.yaml.in
generated
vendored
214
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/kube-dns.yaml.in
generated
vendored
@@ -1,214 +0,0 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
|
||||
# in sync with this file.
|
||||
|
||||
# Warning: This is a file generated from the base underscore template file: kube-dns.yaml.base
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: {{ pillar['dns_server'] }}
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
# replicas: not specified here:
|
||||
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
||||
# 2. Default is 1.
|
||||
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 10%
|
||||
maxUnavailable: 0
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
volumes:
|
||||
- name: kube-dns-config
|
||||
configMap:
|
||||
name: kube-dns
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
# guaranteed class. Currently, this container falls into the
|
||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/kubedns
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 8081
|
||||
scheme: HTTP
|
||||
# we poll on pod startup for the Kubernetes master service and
|
||||
# only setup the /readiness HTTP server once that's available.
|
||||
initialDelaySeconds: 3
|
||||
timeoutSeconds: 5
|
||||
args:
|
||||
- --domain={{ pillar['dns_domain'] }}.
|
||||
- --dns-port=10053
|
||||
- --config-dir=/kube-dns-config
|
||||
- --v=2
|
||||
env:
|
||||
- name: PROMETHEUS_PORT
|
||||
value: "10055"
|
||||
ports:
|
||||
- containerPort: 10053
|
||||
name: dns-local
|
||||
protocol: UDP
|
||||
- containerPort: 10053
|
||||
name: dns-tcp-local
|
||||
protocol: TCP
|
||||
- containerPort: 10055
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- -v=2
|
||||
- -logtostderr
|
||||
- -configDir=/etc/k8s/dns/dnsmasq-nanny
|
||||
- -restartDnsmasq=true
|
||||
- --
|
||||
- -k
|
||||
- --cache-size=1000
|
||||
- --no-negcache
|
||||
- --dns-loop-detect
|
||||
- --log-facility=-
|
||||
- --server=/{{ pillar['dns_domain'] }}/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
- --server=/ip6.arpa/127.0.0.1#10053
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
|
||||
resources:
|
||||
requests:
|
||||
cpu: 150m
|
||||
memory: 20Mi
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: k8s.gcr.io/k8s-dns-sidecar:1.14.13
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- --v=2
|
||||
- --logtostderr
|
||||
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ pillar['dns_domain'] }},5,SRV
|
||||
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ pillar['dns_domain'] }},5,SRV
|
||||
ports:
|
||||
- containerPort: 10054
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
memory: 20Mi
|
||||
cpu: 10m
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
||||
serviceAccountName: kube-dns
|
214
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/kube-dns.yaml.sed
generated
vendored
214
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/kube-dns.yaml.sed
generated
vendored
@@ -1,214 +0,0 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
|
||||
# in sync with this file.
|
||||
|
||||
# Warning: This is a file generated from the base underscore template file: kube-dns.yaml.base
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: $DNS_SERVER_IP
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
# replicas: not specified here:
|
||||
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
||||
# 2. Default is 1.
|
||||
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 10%
|
||||
maxUnavailable: 0
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
volumes:
|
||||
- name: kube-dns-config
|
||||
configMap:
|
||||
name: kube-dns
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
# guaranteed class. Currently, this container falls into the
|
||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/kubedns
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 8081
|
||||
scheme: HTTP
|
||||
# we poll on pod startup for the Kubernetes master service and
|
||||
# only setup the /readiness HTTP server once that's available.
|
||||
initialDelaySeconds: 3
|
||||
timeoutSeconds: 5
|
||||
args:
|
||||
- --domain=$DNS_DOMAIN.
|
||||
- --dns-port=10053
|
||||
- --config-dir=/kube-dns-config
|
||||
- --v=2
|
||||
env:
|
||||
- name: PROMETHEUS_PORT
|
||||
value: "10055"
|
||||
ports:
|
||||
- containerPort: 10053
|
||||
name: dns-local
|
||||
protocol: UDP
|
||||
- containerPort: 10053
|
||||
name: dns-tcp-local
|
||||
protocol: TCP
|
||||
- containerPort: 10055
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- -v=2
|
||||
- -logtostderr
|
||||
- -configDir=/etc/k8s/dns/dnsmasq-nanny
|
||||
- -restartDnsmasq=true
|
||||
- --
|
||||
- -k
|
||||
- --cache-size=1000
|
||||
- --no-negcache
|
||||
- --dns-loop-detect
|
||||
- --log-facility=-
|
||||
- --server=/$DNS_DOMAIN/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
- --server=/ip6.arpa/127.0.0.1#10053
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
|
||||
resources:
|
||||
requests:
|
||||
cpu: 150m
|
||||
memory: 20Mi
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: k8s.gcr.io/k8s-dns-sidecar:1.14.13
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- --v=2
|
||||
- --logtostderr
|
||||
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.$DNS_DOMAIN,5,SRV
|
||||
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.$DNS_DOMAIN,5,SRV
|
||||
ports:
|
||||
- containerPort: 10054
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
memory: 20Mi
|
||||
cpu: 10m
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
||||
serviceAccountName: kube-dns
|
4
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/transforms2salt.sed
generated
vendored
4
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/transforms2salt.sed
generated
vendored
@@ -1,4 +0,0 @@
|
||||
s/__PILLAR__DNS__SERVER__/{{ pillar['dns_server'] }}/g
|
||||
s/__PILLAR__DNS__DOMAIN__/{{ pillar['dns_domain'] }}/g
|
||||
s/__PILLAR__CLUSTER_CIDR__/{{ pillar['service_cluster_ip_range'] }}/g
|
||||
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g
|
4
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/transforms2sed.sed
generated
vendored
4
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns/transforms2sed.sed
generated
vendored
@@ -1,4 +0,0 @@
|
||||
s/__PILLAR__DNS__SERVER__/$DNS_SERVER_IP/g
|
||||
s/__PILLAR__DNS__DOMAIN__/$DNS_DOMAIN/g
|
||||
s/__PILLAR__CLUSTER_CIDR__/$SERVICE_CLUSTER_IP_RANGE/g
|
||||
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g
|
8
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/OWNERS
generated
vendored
8
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/OWNERS
generated
vendored
@@ -1,8 +0,0 @@
|
||||
approvers:
|
||||
- coffeepac
|
||||
- piosz
|
||||
reviewers:
|
||||
- coffeepac
|
||||
- piosz
|
||||
labels:
|
||||
- sig/instrumentation
|
83
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/README.md
generated
vendored
83
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/README.md
generated
vendored
@@ -1,83 +0,0 @@
|
||||
# Elasticsearch Add-On
|
||||
|
||||
This add-on consists of a combination of [Elasticsearch][elasticsearch],
|
||||
[Fluentd][fluentd] and [Kibana][kibana]. Elasticsearch is a search engine
|
||||
that is responsible for storing our logs and allowing for them to be queried.
|
||||
Fluentd sends log messages from Kubernetes to Elasticsearch, whereas Kibana
|
||||
is a graphical interface for viewing and querying the logs stored in
|
||||
Elasticsearch.
|
||||
|
||||
**Note:** this addon should **not** be used as-is in production. This is
|
||||
an example and you should treat it as such. Please see at least the
|
||||
[Security](#security) and the [Storage](#storage) sections for more
|
||||
information.
|
||||
|
||||
## Elasticsearch
|
||||
|
||||
Elasticsearch is deployed as a [StatefulSet][statefulSet], which is like
|
||||
a Deployment, but allows for maintaining state on storage volumes.
|
||||
|
||||
### Security
|
||||
|
||||
Elasticsearch has capabilities to enable authorization using the [X-Pack
|
||||
plugin][xPack]. For the sake of simplicity this example uses the fully open
|
||||
source prebuild images from elastic that do not contain the X-Pack plugin. If
|
||||
you need these features, please consider building the images from either the
|
||||
"basic" or "platinum" version. After enabling these features, follow [official
|
||||
documentation][setupCreds] to set up credentials in Elasticsearch and Kibana.
|
||||
Don't forget to propagate those credentials also to Fluentd in its
|
||||
[configuration][fluentdCreds], using for example [environment
|
||||
variables][fluentdEnvVar]. You can utilize [ConfigMaps][configMap] and
|
||||
[Secrets][secret] to store credentials in the Kubernetes apiserver.
|
||||
|
||||
### Initialization
|
||||
|
||||
The Elasticsearch StatefulSet manifest specifies that there shall be an
|
||||
[init container][initContainer] executing before Elasticsearch containers
|
||||
themselves, in order to ensure that the kernel state variable
|
||||
`vm.max_map_count` is at least 262144, since this is a requirement of
|
||||
Elasticsearch. You may remove the init container if you know that your host
|
||||
OS meets this requirement.
|
||||
|
||||
### Storage
|
||||
|
||||
The Elasticsearch StatefulSet will use the [EmptyDir][emptyDir] volume to
|
||||
store data. EmptyDir is erased when the pod terminates, here it is used only
|
||||
for testing purposes. **Important:** please change the storage to persistent
|
||||
volume claim before actually using this StatefulSet in your setup!
|
||||
|
||||
## Fluentd
|
||||
|
||||
Fluentd is deployed as a [DaemonSet][daemonSet] which spawns a pod on each
|
||||
node that reads logs, generated by kubelet, container runtime and containers
|
||||
and sends them to Elasticsearch.
|
||||
|
||||
**Note:** in order for Fluentd to work, every Kubernetes node must be labeled
|
||||
with `beta.kubernetes.io/fluentd-ds-ready=true`, as otherwise the Fluentd
|
||||
DaemonSet will ignore them.
|
||||
|
||||
Learn more in the [official Kubernetes documentation][k8sElasticsearchDocs].
|
||||
|
||||
### Known problems
|
||||
|
||||
Since Fluentd talks to the Elasticsearch service inside the cluster, instances
|
||||
on masters won't work, because masters have no kube-proxy. Don't mark masters
|
||||
with the label mentioned in the previous paragraph or add a taint on them to
|
||||
avoid Fluentd pods scheduling there.
|
||||
|
||||
[fluentd]: http://www.fluentd.org/
|
||||
[elasticsearch]: https://www.elastic.co/products/elasticsearch
|
||||
[kibana]: https://www.elastic.co/products/kibana
|
||||
[xPack]: https://www.elastic.co/products/x-pack
|
||||
[setupCreds]: https://www.elastic.co/guide/en/x-pack/current/setting-up-authentication.html#reset-built-in-user-passwords
|
||||
[fluentdCreds]: https://github.com/uken/fluent-plugin-elasticsearch#user-password-path-scheme-ssl_verify
|
||||
[fluentdEnvVar]: https://docs.fluentd.org/v0.12/articles/faq#how-can-i-use-environment-variables-to-configure-parameters-dynamically
|
||||
[configMap]: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/
|
||||
[secret]: https://kubernetes.io/docs/concepts/configuration/secret/
|
||||
[statefulSet]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset
|
||||
[initContainer]: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
|
||||
[emptyDir]: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
|
||||
[daemonSet]: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
|
||||
[k8sElasticsearchDocs]: https://kubernetes.io/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana
|
||||
|
||||
[]()
|
1
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/.gitignore
generated
vendored
1
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/.gitignore
generated
vendored
@@ -1 +0,0 @@
|
||||
elasticsearch_logging_discovery
|
40
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/BUILD
generated
vendored
40
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/BUILD
generated
vendored
@@ -1,40 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "es-image",
|
||||
embed = [":go_default_library"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["elasticsearch_logging_discovery.go"],
|
||||
importpath = "k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
25
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/Dockerfile
generated
vendored
25
vendor/k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image/Dockerfile
generated
vendored
@@ -1,25 +0,0 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM docker.elastic.co/elasticsearch/elasticsearch-oss:6.3.2
|
||||
|
||||
VOLUME ["/data"]
|
||||
EXPOSE 9200 9300
|
||||
|
||||
COPY elasticsearch_logging_discovery run.sh bin/
|
||||
COPY config/elasticsearch.yml config/log4j2.properties config/
|
||||
|
||||
USER root
|
||||
RUN chown -R elasticsearch:elasticsearch ./
|
||||
CMD ["bin/run.sh"]
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user