chore: Update manifests after change
This commit is contained in:
163
clusters/cl01tl/manifests/vault/StatefulSet-vault.yml
Normal file
163
clusters/cl01tl/manifests/vault/StatefulSet-vault.yml
Normal file
@@ -0,0 +1,163 @@
|
||||
---
|
||||
# Source: vault/charts/vault/templates/server-statefulset.yaml
|
||||
# StatefulSet to run the actual vault server cluster.
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: vault
|
||||
namespace: vault
|
||||
labels:
|
||||
app.kubernetes.io/name: vault
|
||||
app.kubernetes.io/instance: vault
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
serviceName: vault-internal
|
||||
podManagementPolicy: Parallel
|
||||
replicas: 3
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: vault
|
||||
app.kubernetes.io/instance: vault
|
||||
component: server
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: vault-0.31.0
|
||||
app.kubernetes.io/name: vault
|
||||
app.kubernetes.io/instance: vault
|
||||
component: server
|
||||
annotations:
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: vault
|
||||
app.kubernetes.io/instance: "vault"
|
||||
component: server
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
serviceAccountName: vault
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsGroup: 1000
|
||||
runAsUser: 100
|
||||
fsGroup: 1000
|
||||
hostNetwork: false
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: vault-config
|
||||
- name: vault-nfs-storage-backup
|
||||
persistentVolumeClaim:
|
||||
claimName: vault-nfs-storage-backup
|
||||
- name: home
|
||||
emptyDir: {}
|
||||
containers:
|
||||
- name: vault
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 512Mi
|
||||
image: hashicorp/vault:1.21.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-ec"
|
||||
args:
|
||||
- "cp /vault/config/extraconfig-from-values.hcl /tmp/storageconfig.hcl;\n[ -n \"${HOST_IP}\" ] && sed -Ei \"s|HOST_IP|${HOST_IP?}|g\" /tmp/storageconfig.hcl;\n[ -n \"${POD_IP}\" ] && sed -Ei \"s|POD_IP|${POD_IP?}|g\" /tmp/storageconfig.hcl;\n[ -n \"${HOSTNAME}\" ] && sed -Ei \"s|HOSTNAME|${HOSTNAME?}|g\" /tmp/storageconfig.hcl;\n[ -n \"${API_ADDR}\" ] && sed -Ei \"s|API_ADDR|${API_ADDR?}|g\" /tmp/storageconfig.hcl;\n[ -n \"${TRANSIT_ADDR}\" ] && sed -Ei \"s|TRANSIT_ADDR|${TRANSIT_ADDR?}|g\" /tmp/storageconfig.hcl;\n[ -n \"${RAFT_ADDR}\" ] && sed -Ei \"s|RAFT_ADDR|${RAFT_ADDR?}|g\" /tmp/storageconfig.hcl;\n/usr/local/bin/docker-entrypoint.sh vault server -config=/tmp/storageconfig.hcl \n"
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
env:
|
||||
- name: HOST_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.hostIP
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: VAULT_K8S_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: VAULT_K8S_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: VAULT_ADDR
|
||||
value: "http://127.0.0.1:8200"
|
||||
- name: VAULT_API_ADDR
|
||||
value: "http://$(POD_IP):8200"
|
||||
- name: SKIP_CHOWN
|
||||
value: "true"
|
||||
- name: SKIP_SETCAP
|
||||
value: "true"
|
||||
- name: HOSTNAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: VAULT_CLUSTER_ADDR
|
||||
value: "https://$(HOSTNAME).vault-internal:8201"
|
||||
- name: HOME
|
||||
value: "/home/vault"
|
||||
- name: VAULT_LOG_LEVEL
|
||||
value: "debug"
|
||||
- name: VAULT_LOG_FORMAT
|
||||
value: "standard"
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /vault/data
|
||||
- name: config
|
||||
mountPath: /vault/config
|
||||
- mountPath: /opt/backups/
|
||||
name: vault-nfs-storage-backup
|
||||
readOnly: false
|
||||
- name: home
|
||||
mountPath: /home/vault
|
||||
ports:
|
||||
- containerPort: 8200
|
||||
name: http
|
||||
- containerPort: 8201
|
||||
name: https-internal
|
||||
- containerPort: 8202
|
||||
name: http-rep
|
||||
readinessProbe:
|
||||
# Check status; unsealed vault servers return 0
|
||||
# The exit code reflects the seal status:
|
||||
# 0 - unsealed
|
||||
# 1 - error
|
||||
# 2 - sealed
|
||||
exec:
|
||||
command: ["/bin/sh", "-ec", "vault status -tls-skip-verify"]
|
||||
failureThreshold: 2
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 3
|
||||
lifecycle:
|
||||
# Vault container doesn't receive SIGTERM from Kubernetes
|
||||
# and after the grace period ends, Kube sends SIGKILL. This
|
||||
# causes issues with graceful shutdowns such as deregistering itself
|
||||
# from Consul (zombie services).
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-c"
|
||||
# Adding a sleep here to give the pod eviction a
|
||||
# chance to propagate, so requests will not be made
|
||||
# to this pod while it's terminating
|
||||
- "sleep 5 && kill -SIGTERM $(pidof vault)"
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
Reference in New Issue
Block a user