Automated Manifest Update (#2259)
This PR contains newly rendered Kubernetes manifests automatically generated by the CI workflow. Reviewed-on: #2259 Co-authored-by: gitea-bot <gitea-bot@alexlebens.net> Co-committed-by: gitea-bot <gitea-bot@alexlebens.net>
This commit was merged in pull request #2259.
This commit is contained in:
139
clusters/cl01tl/manifests/cilium/Deployment-cilium-operator.yaml
Normal file
139
clusters/cl01tl/manifests/cilium/Deployment-cilium-operator.yaml
Normal file
@@ -0,0 +1,139 @@
|
||||
---
|
||||
# Source: cilium/charts/cilium/templates/cilium-operator/deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
namespace: kube-system
|
||||
labels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
app.kubernetes.io/part-of: cilium
|
||||
app.kubernetes.io/name: cilium-operator
|
||||
spec:
|
||||
# See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go
|
||||
# for more details.
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
# ensure operator update on single node k8s clusters, by using rolling update with maxUnavailable=100% in case
|
||||
# of one replica and no user configured Recreate strategy.
|
||||
# otherwise an update might get stuck due to the default maxUnavailable=50% in combination with the
|
||||
# podAntiAffinity which prevents deployments of multiple operator replicas on the same node.
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 25%
|
||||
maxUnavailable: 50%
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
# ensure pods roll when configmap updates
|
||||
cilium.io/cilium-configmap-checksum: "31ad7748e0aefe75b6436d96c8c85754e0b44e68e6012fa188bc5bcd66085828"
|
||||
labels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
app.kubernetes.io/part-of: cilium
|
||||
app.kubernetes.io/name: cilium-operator
|
||||
spec:
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
containers:
|
||||
- name: cilium-operator
|
||||
image: "quay.io/cilium/operator-generic:v1.18.4@sha256:1b22b9ff28affdf574378a70dade4ef835b00b080c2ee2418530809dd62c3012"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- cilium-operator-generic
|
||||
args:
|
||||
- --config-dir=/tmp/cilium/config-map
|
||||
- --debug=$(CILIUM_DEBUG)
|
||||
env:
|
||||
- name: K8S_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: CILIUM_K8S_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: CILIUM_DEBUG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: debug
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
value: "localhost"
|
||||
- name: KUBERNETES_SERVICE_PORT
|
||||
value: "7445"
|
||||
ports:
|
||||
- name: prometheus
|
||||
containerPort: 9963
|
||||
hostPort: 9963
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: "127.0.0.1"
|
||||
path: /healthz
|
||||
port: 9234
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
host: "127.0.0.1"
|
||||
path: /healthz
|
||||
port: 9234
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 0
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 3
|
||||
failureThreshold: 5
|
||||
volumeMounts:
|
||||
- name: cilium-config-path
|
||||
mountPath: /tmp/cilium/config-map
|
||||
readOnly: true
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
terminationMessagePolicy: FallbackToLogsOnError
|
||||
hostNetwork: true
|
||||
restartPolicy: Always
|
||||
priorityClassName: system-cluster-critical
|
||||
serviceAccountName: "cilium-operator"
|
||||
automountServiceAccountToken: true
|
||||
# In HA mode, cilium-operator pods must not be scheduled on the same
|
||||
# node as they will clash with each other.
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
io.cilium/app: operator
|
||||
topologyKey: kubernetes.io/hostname
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
- key: node.kubernetes.io/not-ready
|
||||
operator: Exists
|
||||
- key: node.cloudprovider.kubernetes.io/uninitialized
|
||||
operator: Exists
|
||||
- key: node.cilium.io/agent-not-ready
|
||||
operator: Exists
|
||||
volumes:
|
||||
# To read the configuration from the config map
|
||||
- name: cilium-config-path
|
||||
configMap:
|
||||
name: cilium-config
|
||||
Reference in New Issue
Block a user