apiVersion: apps/v1 kind: DaemonSet metadata: name: cilium namespace: kube-system labels: k8s-app: cilium app.kubernetes.io/part-of: cilium app.kubernetes.io/name: cilium-agent spec: selector: matchLabels: k8s-app: cilium updateStrategy: rollingUpdate: maxUnavailable: 2 type: RollingUpdate template: metadata: annotations: cilium.io/cilium-configmap-checksum: "31ad7748e0aefe75b6436d96c8c85754e0b44e68e6012fa188bc5bcd66085828" kubectl.kubernetes.io/default-container: cilium-agent labels: k8s-app: cilium app.kubernetes.io/name: cilium-agent app.kubernetes.io/part-of: cilium spec: securityContext: appArmorProfile: type: Unconfined seccompProfile: type: Unconfined containers: - name: cilium-agent image: "quay.io/cilium/cilium:v1.18.4@sha256:49d87af187eeeb9e9e3ec2bc6bd372261a0b5cb2d845659463ba7cc10fe9e45f" imagePullPolicy: IfNotPresent command: - cilium-agent args: - --config-dir=/tmp/cilium/config-map startupProbe: httpGet: host: "127.0.0.1" path: /healthz port: 9879 scheme: HTTP httpHeaders: - name: "brief" value: "true" failureThreshold: 300 periodSeconds: 2 successThreshold: 1 initialDelaySeconds: 5 livenessProbe: httpGet: host: "127.0.0.1" path: /healthz port: 9879 scheme: HTTP httpHeaders: - name: "brief" value: "true" - name: "require-k8s-connectivity" value: "false" periodSeconds: 30 successThreshold: 1 failureThreshold: 10 timeoutSeconds: 5 readinessProbe: httpGet: host: "127.0.0.1" path: /healthz port: 9879 scheme: HTTP httpHeaders: - name: "brief" value: "true" periodSeconds: 30 successThreshold: 1 failureThreshold: 3 timeoutSeconds: 5 env: - name: K8S_NODE_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: spec.nodeName - name: CILIUM_K8S_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: CILIUM_CLUSTERMESH_CONFIG value: /var/lib/cilium/clustermesh/ - name: GOMEMLIMIT valueFrom: resourceFieldRef: resource: limits.memory divisor: '1' - name: KUBERNETES_SERVICE_HOST value: "localhost" - name: KUBERNETES_SERVICE_PORT value: "7445" - name: KUBE_CLIENT_BACKOFF_BASE value: "1" - name: KUBE_CLIENT_BACKOFF_DURATION value: "120" lifecycle: postStart: exec: command: - "bash" - "-c" - | set -o errexit set -o pipefail set -o nounset # When running in AWS ENI mode, it's likely that 'aws-node' has # had a chance to install SNAT iptables rules. These can result # in dropped traffic, so we should attempt to remove them. # We do it using a 'postStart' hook since this may need to run # for nodes which might have already been init'ed but may still # have dangling rules. This is safe because there are no # dependencies on anything that is part of the startup script # itself, and can be safely run multiple times per node (e.g. in # case of a restart). if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]]; then echo 'Deleting iptables rules created by the AWS CNI VPC plugin' iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore fi echo 'Done!' preStop: exec: command: - /cni-uninstall.sh ports: - name: peer-service containerPort: 4244 hostPort: 4244 protocol: TCP - name: prometheus containerPort: 9962 hostPort: 9962 protocol: TCP securityContext: seLinuxOptions: level: s0 type: spc_t capabilities: add: - CHOWN - KILL - NET_ADMIN - NET_RAW - IPC_LOCK - SYS_ADMIN - SYS_RESOURCE - DAC_OVERRIDE - FOWNER - SETGID - SETUID - PERFMON - BPF drop: - ALL terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - name: envoy-sockets mountPath: /var/run/cilium/envoy/sockets readOnly: false - mountPath: /host/proc/sys/net name: host-proc-sys-net - mountPath: /host/proc/sys/kernel name: host-proc-sys-kernel - name: bpf-maps mountPath: /sys/fs/bpf mountPropagation: HostToContainer - name: cilium-cgroup mountPath: /sys/fs/cgroup - name: cilium-run mountPath: /var/run/cilium - name: cilium-netns mountPath: /var/run/cilium/netns mountPropagation: HostToContainer - name: etc-cni-netd mountPath: /host/etc/cni/net.d - name: clustermesh-secrets mountPath: /var/lib/cilium/clustermesh readOnly: true - name: lib-modules mountPath: /lib/modules readOnly: true - name: xtables-lock mountPath: /run/xtables.lock - name: hubble-tls mountPath: /var/lib/cilium/tls/hubble readOnly: true - name: tmp mountPath: /tmp initContainers: - name: config image: "quay.io/cilium/cilium:v1.18.4@sha256:49d87af187eeeb9e9e3ec2bc6bd372261a0b5cb2d845659463ba7cc10fe9e45f" imagePullPolicy: IfNotPresent command: - cilium-dbg - build-config env: - name: K8S_NODE_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: spec.nodeName - name: CILIUM_K8S_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: KUBERNETES_SERVICE_HOST value: "localhost" - name: KUBERNETES_SERVICE_PORT value: "7445" volumeMounts: - name: tmp mountPath: /tmp terminationMessagePolicy: FallbackToLogsOnError - name: apply-sysctl-overwrites image: "quay.io/cilium/cilium:v1.18.4@sha256:49d87af187eeeb9e9e3ec2bc6bd372261a0b5cb2d845659463ba7cc10fe9e45f" imagePullPolicy: IfNotPresent env: - name: BIN_PATH value: /opt/cni/bin command: - sh - -ec - | cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; rm /hostbin/cilium-sysctlfix volumeMounts: - name: hostproc mountPath: /hostproc - name: cni-path mountPath: /hostbin terminationMessagePolicy: FallbackToLogsOnError securityContext: seLinuxOptions: level: s0 type: spc_t capabilities: add: - SYS_ADMIN - SYS_CHROOT - SYS_PTRACE drop: - ALL - name: mount-bpf-fs image: "quay.io/cilium/cilium:v1.18.4@sha256:49d87af187eeeb9e9e3ec2bc6bd372261a0b5cb2d845659463ba7cc10fe9e45f" imagePullPolicy: IfNotPresent args: - 'mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf' command: - /bin/bash - -c - -- terminationMessagePolicy: FallbackToLogsOnError securityContext: privileged: true volumeMounts: - name: bpf-maps mountPath: /sys/fs/bpf mountPropagation: Bidirectional - name: clean-cilium-state image: "quay.io/cilium/cilium:v1.18.4@sha256:49d87af187eeeb9e9e3ec2bc6bd372261a0b5cb2d845659463ba7cc10fe9e45f" imagePullPolicy: IfNotPresent command: - /init-container.sh env: - name: CILIUM_ALL_STATE valueFrom: configMapKeyRef: name: cilium-config key: clean-cilium-state optional: true - name: CILIUM_BPF_STATE valueFrom: configMapKeyRef: name: cilium-config key: clean-cilium-bpf-state optional: true - name: WRITE_CNI_CONF_WHEN_READY valueFrom: configMapKeyRef: name: cilium-config key: write-cni-conf-when-ready optional: true - name: KUBERNETES_SERVICE_HOST value: "localhost" - name: KUBERNETES_SERVICE_PORT value: "7445" terminationMessagePolicy: FallbackToLogsOnError securityContext: seLinuxOptions: level: s0 type: spc_t capabilities: add: - NET_ADMIN - SYS_ADMIN - SYS_RESOURCE drop: - ALL volumeMounts: - name: bpf-maps mountPath: /sys/fs/bpf - name: cilium-cgroup mountPath: /sys/fs/cgroup mountPropagation: HostToContainer - name: cilium-run mountPath: /var/run/cilium - name: install-cni-binaries image: "quay.io/cilium/cilium:v1.18.4@sha256:49d87af187eeeb9e9e3ec2bc6bd372261a0b5cb2d845659463ba7cc10fe9e45f" imagePullPolicy: IfNotPresent command: - "/install-plugin.sh" resources: requests: cpu: 100m memory: 10Mi securityContext: seLinuxOptions: level: s0 type: spc_t capabilities: drop: - ALL terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - name: cni-path mountPath: /host/opt/cni/bin restartPolicy: Always priorityClassName: system-node-critical serviceAccountName: "cilium" automountServiceAccountToken: true terminationGracePeriodSeconds: 1 hostNetwork: true affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchLabels: k8s-app: cilium topologyKey: kubernetes.io/hostname nodeSelector: kubernetes.io/os: linux tolerations: - operator: Exists volumes: - name: tmp emptyDir: {} - name: cilium-run hostPath: path: /var/run/cilium type: DirectoryOrCreate - name: cilium-netns hostPath: path: /var/run/netns type: DirectoryOrCreate - name: bpf-maps hostPath: path: /sys/fs/bpf type: DirectoryOrCreate - name: hostproc hostPath: path: /proc type: Directory - name: cilium-cgroup hostPath: path: /sys/fs/cgroup type: DirectoryOrCreate - name: cni-path hostPath: path: /opt/cni/bin type: DirectoryOrCreate - name: etc-cni-netd hostPath: path: /etc/cni/net.d type: DirectoryOrCreate - name: lib-modules hostPath: path: /lib/modules - name: xtables-lock hostPath: path: /run/xtables.lock type: FileOrCreate - name: envoy-sockets hostPath: path: "/var/run/cilium/envoy/sockets" type: DirectoryOrCreate - name: clustermesh-secrets projected: defaultMode: 0400 sources: - secret: name: cilium-clustermesh optional: true - secret: name: clustermesh-apiserver-remote-cert optional: true items: - key: tls.key path: common-etcd-client.key - key: tls.crt path: common-etcd-client.crt - key: ca.crt path: common-etcd-client-ca.crt - secret: name: clustermesh-apiserver-local-cert optional: true items: - key: tls.key path: local-etcd-client.key - key: tls.crt path: local-etcd-client.crt - key: ca.crt path: local-etcd-client-ca.crt - name: host-proc-sys-net hostPath: path: /proc/sys/net type: Directory - name: host-proc-sys-kernel hostPath: path: /proc/sys/kernel type: Directory - name: hubble-tls projected: defaultMode: 0400 sources: - secret: name: hubble-server-certs optional: true items: - key: tls.crt path: server.crt - key: tls.key path: server.key - key: ca.crt path: client-ca.crt