Files
infrastructure/clusters/cl01tl/manifests/authentik/Cluster-authentik-postgresql-18-cluster.yaml

1816 lines
63 KiB
YAML

apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: authentik-postgresql-18-cluster
namespace: authentik
labels:
helm.sh/chart: postgres-18-cluster-7.9.1
app.kubernetes.io/name: authentik-postgresql-18
app.kubernetes.io/instance: authentik
app.kubernetes.io/part-of: authentik
app.kubernetes.io/version: "7.9.1"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:18.3-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "authentik-postgresql-18-backup-garage-local"
serverName: "authentik-postgresql-18-backup-2"
bootstrap:
recovery:
database: app
source: authentik-postgresql-18-backup-2
externalClusters:
- name: authentik-postgresql-18-backup-2
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "authentik-postgresql-18-recovery"
serverName: authentik-postgresql-18-backup-2
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "authentik-authentik"
labels:
helm.sh/chart: "serviceAccount-2.1.0"
app.kubernetes.io/name: "serviceAccount"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/part-of: "authentik"
app.kubernetes.io/version: "2.1.0"
rules:
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: "authentik-authentik"
labels:
helm.sh/chart: "serviceAccount-2.1.0"
app.kubernetes.io/name: "serviceAccount"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/part-of: "authentik"
app.kubernetes.io/version: "2.1.0"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: "authentik-authentik"
subjects:
- kind: ServiceAccount
name: authentik
namespace: "authentik"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: authentik-valkey-init-scripts
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
data:
init.sh: |-
#!/bin/sh
set -eu
# Default config paths
VALKEY_CONFIG=${VALKEY_CONFIG_PATH:-/data/conf/valkey.conf}
LOGFILE="/data/init.log"
DATA_DIR="/data/conf"
# Logging function (outputs to stderr and file)
log() {
echo "$(date) $1" | tee -a "$LOGFILE" >&2
}
# Clean old log if requested
if [ "${KEEP_OLD_LOGS:-false}" != "true" ]; then
rm -f "$LOGFILE"
fi
if [ -f "$LOGFILE" ]; then
log "Detected restart of this instance ($HOSTNAME)"
fi
log "Creating configuration in $DATA_DIR..."
mkdir -p "$DATA_DIR"
rm -f "$VALKEY_CONFIG"
# Base valkey.conf
log "Generating base valkey.conf"
{
echo "port 6379"
echo "protected-mode no"
echo "bind * -::*"
echo "dir /data"
} >>"$VALKEY_CONFIG"
# Replica mode configuration
log "Configuring replication mode"
# Use POD_INDEX from Kubernetes metadata
POD_INDEX=${POD_INDEX:-0}
IS_MASTER=false
# Check if this is pod-0 (master)
if [ "$POD_INDEX" = "0" ]; then
IS_MASTER=true
log "This pod (index $POD_INDEX) is configured as MASTER"
else
log "This pod (index $POD_INDEX) is configured as REPLICA"
fi
# Configure replica settings
if [ "$IS_MASTER" = "false" ]; then
MASTER_HOST="authentik-valkey-0.authentik-valkey-headless.authentik.svc.cluster.local"
MASTER_PORT="6379"
log "Configuring replica to follow master at $MASTER_HOST:$MASTER_PORT"
{
echo ""
echo "# Replica Configuration"
echo "replicaof $MASTER_HOST $MASTER_PORT"
echo "replica-announce-ip authentik-valkey-$POD_INDEX.authentik-valkey-headless.authentik.svc.cluster.local"
} >>"$VALKEY_CONFIG"
fi
# Append extra configs if present
if [ -f /usr/local/etc/valkey/valkey.conf ]; then
log "Appending /usr/local/etc/valkey/valkey.conf"
cat /usr/local/etc/valkey/valkey.conf >>"$VALKEY_CONFIG"
fi
if [ -d /extravalkeyconfigs ]; then
log "Appending files in /extravalkeyconfigs/"
cat /extravalkeyconfigs/* >>"$VALKEY_CONFIG"
fi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: authentik-cloudflared
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: authentik
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cloudflared
app.kubernetes.io/version: 2026.3.0
helm.sh/chart: cloudflared-2.4.0
namespace: authentik
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: cloudflared
app.kubernetes.io/instance: authentik
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: authentik
app.kubernetes.io/name: cloudflared
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- --protocol
- http2
- --no-autoupdate
- run
- --token
- $(CF_MANAGED_TUNNEL_TOKEN)
env:
- name: CF_MANAGED_TUNNEL_TOKEN
valueFrom:
secretKeyRef:
key: cf-tunnel-token
name: authentik-cloudflared-secret
image: cloudflare/cloudflared:2026.3.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: authentik-server
namespace: "authentik"
labels:
helm.sh/chart: "authentik-2026.2.1"
app.kubernetes.io/name: "authentik"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/component: "server"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/part-of: "authentik"
app.kubernetes.io/version: "2026.2.1"
spec:
replicas: 1
revisionHistoryLimit: 3
selector:
matchLabels:
app.kubernetes.io/name: "authentik"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/component: "server"
template:
metadata:
labels:
helm.sh/chart: "authentik-2026.2.1"
app.kubernetes.io/name: "authentik"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/component: "server"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/part-of: "authentik"
app.kubernetes.io/version: "2026.2.1"
annotations:
checksum/secret: e52441637b77f4baccf56d0c193e99c35c2eeef6b6c753a17e45254ca6b20e18
spec:
terminationGracePeriodSeconds: 30
containers:
- name: server
image: ghcr.io/goauthentik/server:2026.2.1
imagePullPolicy: IfNotPresent
args:
- server
env:
- name: AUTHENTIK_SECRET_KEY
valueFrom:
secretKeyRef:
key: key
name: authentik-key-secret
- name: AUTHENTIK_POSTGRESQL__HOST
valueFrom:
secretKeyRef:
key: host
name: authentik-postgresql-18-cluster-app
- name: AUTHENTIK_POSTGRESQL__NAME
valueFrom:
secretKeyRef:
key: dbname
name: authentik-postgresql-18-cluster-app
- name: AUTHENTIK_POSTGRESQL__USER
valueFrom:
secretKeyRef:
key: user
name: authentik-postgresql-18-cluster-app
- name: AUTHENTIK_POSTGRESQL__PASSWORD
valueFrom:
secretKeyRef:
key: password
name: authentik-postgresql-18-cluster-app
- name: AUTHENTIK_LISTEN__HTTP
value: "0.0.0.0:9000"
- name: AUTHENTIK_LISTEN__HTTPS
value: "0.0.0.0:9443"
- name: AUTHENTIK_LISTEN__METRICS
value: "0.0.0.0:9300"
envFrom:
- secretRef:
name: authentik
ports:
- name: http
containerPort: 9000
protocol: TCP
- name: https
containerPort: 9443
protocol: TCP
- name: metrics
containerPort: 9300
protocol: TCP
livenessProbe:
failureThreshold: 3
httpGet:
path: '/-/health/live/'
port: http
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
readinessProbe:
failureThreshold: 3
httpGet:
path: '/-/health/ready/'
port: http
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
startupProbe:
failureThreshold: 60
httpGet:
path: '/-/health/live/'
port: http
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
resources: {}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: "authentik"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/component: "server"
topologyKey: kubernetes.io/hostname
enableServiceLinks: true
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: authentik-worker
namespace: "authentik"
labels:
helm.sh/chart: "authentik-2026.2.1"
app.kubernetes.io/name: "authentik"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/component: "worker"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/part-of: "authentik"
app.kubernetes.io/version: "2026.2.1"
spec:
replicas: 1
revisionHistoryLimit: 3
selector:
matchLabels:
app.kubernetes.io/name: "authentik"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/component: "worker"
template:
metadata:
labels:
helm.sh/chart: "authentik-2026.2.1"
app.kubernetes.io/name: "authentik"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/component: "worker"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/part-of: "authentik"
app.kubernetes.io/version: "2026.2.1"
annotations:
checksum/secret: e52441637b77f4baccf56d0c193e99c35c2eeef6b6c753a17e45254ca6b20e18
spec:
serviceAccountName: authentik
terminationGracePeriodSeconds: 30
containers:
- name: worker
image: ghcr.io/goauthentik/server:2026.2.1
imagePullPolicy: IfNotPresent
args:
- worker
env:
- name: AUTHENTIK_SECRET_KEY
valueFrom:
secretKeyRef:
key: key
name: authentik-key-secret
- name: AUTHENTIK_POSTGRESQL__HOST
valueFrom:
secretKeyRef:
key: host
name: authentik-postgresql-18-cluster-app
- name: AUTHENTIK_POSTGRESQL__NAME
valueFrom:
secretKeyRef:
key: dbname
name: authentik-postgresql-18-cluster-app
- name: AUTHENTIK_POSTGRESQL__USER
valueFrom:
secretKeyRef:
key: user
name: authentik-postgresql-18-cluster-app
- name: AUTHENTIK_POSTGRESQL__PASSWORD
valueFrom:
secretKeyRef:
key: password
name: authentik-postgresql-18-cluster-app
- name: AUTHENTIK_LISTEN__HTTP
value: "0.0.0.0:9000"
- name: AUTHENTIK_LISTEN__METRICS
value: "0.0.0.0:9300"
envFrom:
- secretRef:
name: authentik
ports:
- name: http
containerPort: 9000
protocol: TCP
- name: metrics
containerPort: 9300
protocol: TCP
livenessProbe:
exec:
command:
- ak
- healthcheck
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
readinessProbe:
exec:
command:
- ak
- healthcheck
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
startupProbe:
exec:
command:
- ak
- healthcheck
failureThreshold: 60
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
resources: {}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: "authentik"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/component: "worker"
topologyKey: kubernetes.io/hostname
enableServiceLinks: true
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: authentik-cloudflared-secret
namespace: authentik
labels:
helm.sh/chart: cloudflared-2.4.0
app.kubernetes.io/instance: authentik
app.kubernetes.io/part-of: authentik
app.kubernetes.io/version: "2.4.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: authentik-cloudflared-secret
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/authentik
metadataPolicy: None
property: token
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: authentik-key-secret
namespace: authentik
labels:
app.kubernetes.io/name: authentik-key-secret
app.kubernetes.io/instance: authentik
app.kubernetes.io/part-of: authentik
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/authentik/key
metadataPolicy: None
property: key
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: authentik-postgresql-18-backup-garage-local-secret
namespace: authentik
labels:
helm.sh/chart: postgres-18-cluster-7.9.1
app.kubernetes.io/name: authentik-postgresql-18
app.kubernetes.io/instance: authentik
app.kubernetes.io/part-of: authentik
app.kubernetes.io/version: "7.9.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: authentik-postgresql-18-backup-garage-local-secret
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: authentik-postgresql-18-recovery-secret
namespace: authentik
labels:
helm.sh/chart: postgres-18-cluster-7.9.1
app.kubernetes.io/name: authentik-postgresql-18
app.kubernetes.io/instance: authentik
app.kubernetes.io/part-of: authentik
app.kubernetes.io/version: "7.9.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: authentik-postgresql-18-recovery-secret
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
---
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: authentik-server
namespace: "authentik"
labels:
helm.sh/chart: "authentik-2026.2.1"
app.kubernetes.io/name: "authentik"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/component: "server"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/part-of: "authentik"
app.kubernetes.io/version: "2026.2.1"
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- authentik.alexlebens.net
rules:
- backendRefs:
- group: ''
kind: Service
name: authentik-server
port: 80
weight: 1
matches:
- path:
type: PathPrefix
value: /
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: authentik-tailscale
namespace: authentik
labels:
app.kubernetes.io/name: authentik-tailscale
app.kubernetes.io/instance: authentik
app.kubernetes.io/part-of: authentik
tailscale.com/proxy-class: no-metrics
annotations:
tailscale.com/experimental-forward-cluster-traffic-via-ingress: "true"
spec:
ingressClassName: tailscale
tls:
- hosts:
- auth-cl01tl
secretName: auth-cl01tl
rules:
- host: auth-cl01tl
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: authentik-server
port:
number: 80
---
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: authentik-postgresql-18-backup-garage-local
namespace: authentik
labels:
helm.sh/chart: postgres-18-cluster-7.9.1
app.kubernetes.io/name: authentik-postgresql-18
app.kubernetes.io/instance: authentik
app.kubernetes.io/part-of: authentik
app.kubernetes.io/version: "7.9.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: authentik-postgresql-18-backup-garage-local
spec:
retentionPolicy: 7d
instanceSidecarConfiguration:
env:
- name: AWS_REQUEST_CHECKSUM_CALCULATION
value: when_required
- name: AWS_RESPONSE_CHECKSUM_VALIDATION
value: when_required
configuration:
destinationPath: s3://postgres-backups/cl01tl/authentik/authentik-postgresql-18-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: authentik-postgresql-18-backup-garage-local-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: authentik-postgresql-18-backup-garage-local-secret
key: ACCESS_SECRET_KEY
region:
name: authentik-postgresql-18-backup-garage-local-secret
key: ACCESS_REGION
---
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "authentik-postgresql-18-recovery"
namespace: authentik
labels:
helm.sh/chart: postgres-18-cluster-7.9.1
app.kubernetes.io/name: authentik-postgresql-18
app.kubernetes.io/instance: authentik
app.kubernetes.io/part-of: authentik
app.kubernetes.io/version: "7.9.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: "authentik-postgresql-18-recovery"
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/authentik/authentik-postgresql-18-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: authentik-postgresql-18-recovery-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: authentik-postgresql-18-recovery-secret
key: ACCESS_SECRET_KEY
region:
name: authentik-postgresql-18-recovery-secret
key: ACCESS_REGION
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: authentik-valkey
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: valkey
app.kubernetes.io/component: podmonitor
spec:
podMetricsEndpoints:
- port: metrics
interval: 30s
namespaceSelector:
matchNames:
- authentik
selector:
matchLabels:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: authentik
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: authentik-postgresql-18-alert-rules
namespace: authentik
labels:
helm.sh/chart: postgres-18-cluster-7.9.1
app.kubernetes.io/name: authentik-postgresql-18
app.kubernetes.io/instance: authentik
app.kubernetes.io/part-of: authentik
app.kubernetes.io/version: "7.9.1"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/authentik-postgresql-18
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total{namespace="authentik"} > 300
for: 1m
labels:
severity: warning
namespace: authentik
cnpg_cluster: authentik-postgresql-18-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks{namespace="authentik"} > 10
for: 1m
labels:
severity: warning
namespace: authentik
cnpg_cluster: authentik-postgresql-18-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="authentik"} - cnpg_pg_replication_is_wal_receiver_up{namespace="authentik"}) < 1
for: 5m
labels:
severity: critical
namespace: authentik
cnpg_cluster: authentik-postgresql-18-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="authentik"} - cnpg_pg_replication_is_wal_receiver_up{namespace="authentik"}) < 2
for: 5m
labels:
severity: warning
namespace: authentik
cnpg_cluster: authentik-postgresql-18-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "authentik/authentik-postgresql-18-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="authentik", pod=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="authentik", pod=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: authentik
cnpg_cluster: authentik-postgresql-18-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "authentik/authentik-postgresql-18-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="authentik", pod=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="authentik", pod=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: authentik
cnpg_cluster: authentik-postgresql-18-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "authentik/authentik-postgresql-18-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="authentik",pod=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: authentik
cnpg_cluster: authentik-postgresql-18-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "authentik/authentik-postgresql-18-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="authentik", pod=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: authentik
cnpg_cluster: authentik-postgresql-18-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds{namespace="authentik"} > 300
for: 1m
labels:
severity: warning
namespace: authentik
cnpg_cluster: authentik-postgresql-18-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "authentik/authentik-postgresql-18-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="authentik", persistentvolumeclaim=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="authentik", persistentvolumeclaim=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="authentik", persistentvolumeclaim=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="authentik", persistentvolumeclaim=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="authentik", persistentvolumeclaim=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="authentik", persistentvolumeclaim=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: authentik
cnpg_cluster: authentik-postgresql-18-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "authentik/authentik-postgresql-18-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="authentik", persistentvolumeclaim=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="authentik", persistentvolumeclaim=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="authentik", persistentvolumeclaim=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="authentik", persistentvolumeclaim=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="authentik", persistentvolumeclaim=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="authentik", persistentvolumeclaim=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: authentik
cnpg_cluster: authentik-postgresql-18-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "authentik/authentik-postgresql-18-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="authentik",pod=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: authentik
cnpg_cluster: authentik-postgresql-18-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age{namespace="authentik"} > 300000000
for: 1m
labels:
severity: warning
namespace: authentik
cnpg_cluster: authentik-postgresql-18-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag{namespace="authentik"} > 300
for: 1m
labels:
severity: warning
namespace: authentik
cnpg_cluster: authentik-postgresql-18-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery{namespace="authentik"} > cnpg_pg_replication_is_wal_receiver_up{namespace="authentik"}
for: 1m
labels:
severity: warning
namespace: authentik
cnpg_cluster: authentik-postgresql-18-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "authentik/authentik-postgresql-18-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="authentik", pod=~"authentik-postgresql-18-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: authentik
cnpg_cluster: authentik-postgresql-18-cluster
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: authentik-valkey
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: valkey
spec:
groups:
- name: authentik-valkey
rules:
- alert: ValkeyDown
annotations:
description: Valkey instance {{ $labels.instance }} is down.
summary: Valkey instance {{ $labels.instance }} down
expr: |
redis_up{service="authentik-valkey-metrics"} == 0
for: 2m
labels:
severity: error
- alert: ValkeyMemoryHigh
annotations:
description: |
Valkey instance {{ $labels.instance }} is using {{ $value }}% of its available memory.
summary: Valkey instance {{ $labels.instance }} is using too much memory
expr: |
redis_memory_used_bytes{service="authentik-valkey-metrics"} * 100
/
redis_memory_max_bytes{service="authentik-valkey-metrics"}
> 90 <= 100
for: 2m
labels:
severity: error
- alert: ValkeyKeyEviction
annotations:
description: |
Valkey instance {{ $labels.instance }} has evicted {{ $value }} keys in the last 5 minutes.
summary: Valkey instance {{ $labels.instance }} has evicted keys
expr: |
increase(redis_evicted_keys_total{service="authentik-valkey-metrics"}[5m]) > 0
for: 1s
labels:
severity: error
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: authentik
namespace: "authentik"
labels:
helm.sh/chart: "authentik-2026.2.1"
app.kubernetes.io/name: "authentik"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/part-of: "authentik"
app.kubernetes.io/version: "2026.2.1"
spec:
groups:
- name: authentik Aggregate request counters
rules:
- record: job:django_http_requests_before_middlewares_total:sum_rate30s
expr: sum(rate(django_http_requests_before_middlewares_total[30s])) by (job)
- record: job:django_http_requests_unknown_latency_total:sum_rate30s
expr: sum(rate(django_http_requests_unknown_latency_total[30s])) by (job)
- record: job:django_http_ajax_requests_total:sum_rate30s
expr: sum(rate(django_http_ajax_requests_total[30s])) by (job)
- record: job:django_http_responses_before_middlewares_total:sum_rate30s
expr: sum(rate(django_http_responses_before_middlewares_total[30s])) by (job)
- record: job:django_http_requests_unknown_latency_including_middlewares_total:sum_rate30s
expr: sum(rate(django_http_requests_unknown_latency_including_middlewares_total[30s])) by (job)
- record: job:django_http_requests_body_total_bytes:sum_rate30s
expr: sum(rate(django_http_requests_body_total_bytes[30s])) by (job)
- record: job:django_http_responses_streaming_total:sum_rate30s
expr: sum(rate(django_http_responses_streaming_total[30s])) by (job)
- record: job:django_http_responses_body_total_bytes:sum_rate30s
expr: sum(rate(django_http_responses_body_total_bytes[30s])) by (job)
- record: job:django_http_requests_total:sum_rate30s
expr: sum(rate(django_http_requests_total_by_method[30s])) by (job)
- record: job:django_http_requests_total_by_method:sum_rate30s
expr: sum(rate(django_http_requests_total_by_method[30s])) by (job,method)
- record: job:django_http_requests_total_by_transport:sum_rate30s
expr: sum(rate(django_http_requests_total_by_transport[30s])) by (job,transport)
- record: job:django_http_requests_total_by_view:sum_rate30s
expr: sum(rate(django_http_requests_total_by_view_transport_method[30s])) by (job,view)
- record: job:django_http_requests_total_by_view_transport_method:sum_rate30s
expr: sum(rate(django_http_requests_total_by_view_transport_method[30s])) by (job,view,transport,method)
- record: job:django_http_responses_total_by_templatename:sum_rate30s
expr: sum(rate(django_http_responses_total_by_templatename[30s])) by (job,templatename)
- record: job:django_http_responses_total_by_status:sum_rate30s
expr: sum(rate(django_http_responses_total_by_status[30s])) by (job,status)
- record: job:django_http_responses_total_by_status_name_method:sum_rate30s
expr: sum(rate(django_http_responses_total_by_status_name_method[30s])) by (job,status,name,method)
- record: job:django_http_responses_total_by_charset:sum_rate30s
expr: sum(rate(django_http_responses_total_by_charset[30s])) by (job,charset)
- record: job:django_http_exceptions_total_by_type:sum_rate30s
expr: sum(rate(django_http_exceptions_total_by_type[30s])) by (job,type)
- record: job:django_http_exceptions_total_by_view:sum_rate30s
expr: sum(rate(django_http_exceptions_total_by_view[30s])) by (job,view)
- name: authentik Aggregate latency histograms
rules:
- record: job:django_http_requests_latency_including_middlewares_seconds:quantile_rate30s
expr: histogram_quantile(0.50, sum(rate(django_http_requests_latency_including_middlewares_seconds_bucket[30s])) by (job, le))
labels:
quantile: "50"
- record: job:django_http_requests_latency_including_middlewares_seconds:quantile_rate30s
expr: histogram_quantile(0.95, sum(rate(django_http_requests_latency_including_middlewares_seconds_bucket[30s])) by (job, le))
labels:
quantile: "95"
- record: job:django_http_requests_latency_including_middlewares_seconds:quantile_rate30s
expr: histogram_quantile(0.99, sum(rate(django_http_requests_latency_including_middlewares_seconds_bucket[30s])) by (job, le))
labels:
quantile: "99"
- record: job:django_http_requests_latency_including_middlewares_seconds:quantile_rate30s
expr: histogram_quantile(0.999, sum(rate(django_http_requests_latency_including_middlewares_seconds_bucket[30s])) by (job, le))
labels:
quantile: "99.9"
- record: job:django_http_requests_latency_seconds:quantile_rate30s
expr: histogram_quantile(0.50, sum(rate(django_http_requests_latency_seconds_bucket[30s])) by (job, le))
labels:
quantile: "50"
- record: job:django_http_requests_latency_seconds:quantile_rate30s
expr: histogram_quantile(0.95, sum(rate(django_http_requests_latency_seconds_bucket[30s])) by (job, le))
labels:
quantile: "95"
- record: job:django_http_requests_latency_seconds:quantile_rate30s
expr: histogram_quantile(0.99, sum(rate(django_http_requests_latency_seconds_bucket[30s])) by (job, le))
labels:
quantile: "99"
- record: job:django_http_requests_latency_seconds:quantile_rate30s
expr: histogram_quantile(0.999, sum(rate(django_http_requests_latency_seconds_bucket[30s])) by (job, le))
labels:
quantile: "99.9"
- name: authentik Aggregate model operations
rules:
- record: job:django_model_inserts_total:sum_rate1m
expr: sum(rate(django_model_inserts_total[1m])) by (job, model)
- record: job:django_model_updates_total:sum_rate1m
expr: sum(rate(django_model_updates_total[1m])) by (job, model)
- record: job:django_model_deletes_total:sum_rate1m
expr: sum(rate(django_model_deletes_total[1m])) by (job, model)
- name: authentik Aggregate database operations
rules:
- record: job:django_db_new_connections_total:sum_rate30s
expr: sum(rate(django_db_new_connections_total[30s])) by (alias, vendor)
- record: job:django_db_new_connection_errors_total:sum_rate30s
expr: sum(rate(django_db_new_connection_errors_total[30s])) by (alias, vendor)
- record: job:django_db_execute_total:sum_rate30s
expr: sum(rate(django_db_execute_total[30s])) by (alias, vendor)
- record: job:django_db_execute_many_total:sum_rate30s
expr: sum(rate(django_db_execute_many_total[30s])) by (alias, vendor)
- record: job:django_db_errors_total:sum_rate30s
expr: sum(rate(django_db_errors_total[30s])) by (alias, vendor, type)
- name: authentik Aggregate migrations
rules:
- record: job:django_migrations_applied_total:max
expr: max(django_migrations_applied_total) by (job, connection)
- record: job:django_migrations_unapplied_total:max
expr: max(django_migrations_unapplied_total) by (job, connection)
- name: authentik Alerts
rules:
- alert: NoWorkersConnected
labels:
severity: critical
expr: max (authentik_tasks_workers) < 1
for: 10m
annotations:
summary: No workers connected
message: authentik instance {{ $labels.instance }}'s worker are either not running or not connected.
- alert: PendingMigrations
labels:
severity: critical
expr: max without (pid) (django_migrations_unapplied_total) > 0
for: 10m
annotations:
summary: Pending database migrations
message: authentik instance {{ $labels.instance }} has pending database migrations
- alert: FailedSystemTasks
labels:
severity: critical
expr: sum(increase(authentik_tasks_errors_total[2h])) by (actor_name) > 0
for: 2h
annotations:
summary: Failed system tasks
message: System task {{ $labels.actor_name }} has failed on authentik instance {{ $labels.instance }}
- alert: DisconnectedOutposts
labels:
severity: critical
expr: sum by (outpost) (max without (pid) (authentik_outposts_connected{uid!~"specific.*"})) < 1
for: 30m
annotations:
summary: Disconnected outpost
message: Outpost {{ $labels.outpost }} has at least 1 disconnected instance
---
apiVersion: gateway.networking.k8s.io/v1beta1
kind: ReferenceGrant
metadata:
name: allow-outpost-cross-namespace-access
namespace: authentik
labels:
app.kubernetes.io/name: allow-outpost-cross-namespace-access
app.kubernetes.io/instance: authentik
app.kubernetes.io/part-of: authentik
spec:
from:
- group: gateway.networking.k8s.io
kind: HTTPRoute
namespace: lidarr
- group: gateway.networking.k8s.io
kind: HTTPRoute
namespace: radarr
- group: gateway.networking.k8s.io
kind: HTTPRoute
namespace: radarr-4k
- group: gateway.networking.k8s.io
kind: HTTPRoute
namespace: radarr-anime
- group: gateway.networking.k8s.io
kind: HTTPRoute
namespace: radarr-standup
- group: gateway.networking.k8s.io
kind: HTTPRoute
namespace: sonarr
- group: gateway.networking.k8s.io
kind: HTTPRoute
namespace: sonarr-4k
- group: gateway.networking.k8s.io
kind: HTTPRoute
namespace: sonarr-anime
to:
- group: ""
kind: Service
name: ak-outpost-traefik-proxy-auth
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: authentik
namespace: "authentik"
labels:
helm.sh/chart: "serviceAccount-2.1.0"
app.kubernetes.io/name: "serviceAccount"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/part-of: "authentik"
app.kubernetes.io/version: "2.1.0"
rules:
- apiGroups:
- ""
resources:
- secrets
- services
- configmaps
verbs:
- get
- create
- delete
- list
- patch
- apiGroups:
- extensions
- apps
resources:
- deployments
verbs:
- get
- create
- delete
- list
- patch
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- create
- delete
- list
- patch
- apiGroups:
- traefik.containo.us
- traefik.io
resources:
- middlewares
verbs:
- get
- create
- delete
- list
- patch
- apiGroups:
- gateway.networking.k8s.io
resources:
- httproutes
verbs:
- get
- create
- delete
- list
- patch
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- get
- create
- delete
- list
- patch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: authentik
namespace: "authentik"
labels:
helm.sh/chart: "serviceAccount-2.1.0"
app.kubernetes.io/name: "serviceAccount"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/part-of: "authentik"
app.kubernetes.io/version: "2.1.0"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: authentik
subjects:
- kind: ServiceAccount
name: authentik
namespace: "authentik"
---
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "authentik-postgresql-18-scheduled-backup-live-backup"
namespace: authentik
labels:
helm.sh/chart: postgres-18-cluster-7.9.1
app.kubernetes.io/name: authentik-postgresql-18
app.kubernetes.io/instance: authentik
app.kubernetes.io/part-of: authentik
app.kubernetes.io/version: "7.9.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: "authentik-postgresql-18-scheduled-backup-live-backup"
spec:
immediate: true
suspend: false
schedule: "0 5 14 * * *"
backupOwnerReference: self
cluster:
name: authentik-postgresql-18-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "authentik-postgresql-18-backup-garage-local"
---
apiVersion: v1
kind: Secret
metadata:
name: authentik
namespace: "authentik"
labels:
helm.sh/chart: "authentik-2026.2.1"
app.kubernetes.io/name: "authentik"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/part-of: "authentik"
app.kubernetes.io/version: "2026.2.1"
data:
AUTHENTIK_EMAIL__PORT: "NTg3"
AUTHENTIK_EMAIL__TIMEOUT: "MzA="
AUTHENTIK_EMAIL__USE_SSL: "ZmFsc2U="
AUTHENTIK_EMAIL__USE_TLS: "ZmFsc2U="
AUTHENTIK_ENABLED: "dHJ1ZQ=="
AUTHENTIK_ERROR_REPORTING__ENABLED: "ZmFsc2U="
AUTHENTIK_ERROR_REPORTING__ENVIRONMENT: "azhz"
AUTHENTIK_ERROR_REPORTING__SEND_PII: "ZmFsc2U="
AUTHENTIK_EVENTS__CONTEXT_PROCESSORS__ASN: "L2dlb2lwL0dlb0xpdGUyLUFTTi5tbWRi"
AUTHENTIK_EVENTS__CONTEXT_PROCESSORS__GEOIP: "L2dlb2lwL0dlb0xpdGUyLUNpdHkubW1kYg=="
AUTHENTIK_LOG_LEVEL: "aW5mbw=="
AUTHENTIK_OUTPOSTS__CONTAINER_IMAGE_BASE: "Z2hjci5pby9nb2F1dGhlbnRpay8lKHR5cGUpczolKHZlcnNpb24pcw=="
AUTHENTIK_POSTGRESQL__HOST: "YXV0aGVudGlrLXBvc3RncmVzcWw="
AUTHENTIK_POSTGRESQL__NAME: "YXV0aGVudGlr"
AUTHENTIK_POSTGRESQL__PORT: "NTQzMg=="
AUTHENTIK_POSTGRESQL__USER: "YXV0aGVudGlr"
AUTHENTIK_REDIS__HOST: "YXV0aGVudGlrLXZhbGtleQ=="
AUTHENTIK_WEB__PATH: "Lw=="
---
apiVersion: v1
kind: Service
metadata:
name: authentik-server-metrics
namespace: "authentik"
labels:
helm.sh/chart: "authentik-2026.2.1"
app.kubernetes.io/name: "authentik"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/component: "server-metrics"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/part-of: "authentik"
app.kubernetes.io/version: "2026.2.1"
spec:
type: ClusterIP
ports:
- name: metrics
protocol: TCP
port: 9300
targetPort: metrics
selector:
app.kubernetes.io/name: "authentik"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/component: "server"
---
apiVersion: v1
kind: Service
metadata:
name: authentik-server
namespace: "authentik"
labels:
helm.sh/chart: "authentik-2026.2.1"
app.kubernetes.io/name: "authentik"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/component: "server"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/part-of: "authentik"
app.kubernetes.io/version: "2026.2.1"
spec:
type: ClusterIP
ports:
- name: http
protocol: TCP
port: 80
targetPort: 9000
- name: https
protocol: TCP
port: 443
targetPort: 9443
selector:
app.kubernetes.io/name: "authentik"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/component: "server"
---
apiVersion: v1
kind: Service
metadata:
name: authentik-valkey-headless
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: headless
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: tcp
port: 6379
targetPort: tcp
protocol: TCP
selector:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: authentik
---
apiVersion: v1
kind: Service
metadata:
name: authentik-valkey-metrics
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: metrics
app.kubernetes.io/part-of: valkey
annotations:
spec:
type: ClusterIP
ports:
- name: metrics
port: 9121
protocol: TCP
targetPort: metrics
selector:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: authentik
---
apiVersion: v1
kind: Service
metadata:
name: authentik-valkey-read
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: read
spec:
type: ClusterIP
ports:
- name: tcp
port: 6379
targetPort: tcp
protocol: TCP
selector:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: authentik
---
apiVersion: v1
kind: Service
metadata:
name: authentik-valkey
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: primary
spec:
type: ClusterIP
ports:
- port: 6379
targetPort: tcp
protocol: TCP
name: tcp
selector:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: authentik
statefulset.kubernetes.io/pod-name: authentik-valkey-0
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: authentik-valkey
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
automountServiceAccountToken: false
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: authentik
namespace: "authentik"
labels:
helm.sh/chart: "serviceAccount-2.1.0"
app.kubernetes.io/name: "serviceAccount"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/part-of: "authentik"
app.kubernetes.io/version: "2.1.0"
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: authentik-server
namespace: "authentik"
labels:
helm.sh/chart: "authentik-2026.2.1"
app.kubernetes.io/name: "authentik"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/component: "server-metrics"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/part-of: "authentik"
app.kubernetes.io/version: "2026.2.1"
spec:
endpoints:
- port: metrics
interval: 30s
scrapeTimeout: 3s
path: /metrics
namespaceSelector:
matchNames:
- authentik
selector:
matchLabels:
app.kubernetes.io/name: "authentik"
app.kubernetes.io/instance: "authentik"
app.kubernetes.io/component: "server-metrics"
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: authentik-valkey
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: valkey
app.kubernetes.io/component: service-monitor
spec:
endpoints:
- port: metrics
interval: 30s
namespaceSelector:
matchNames:
- authentik
selector:
matchLabels:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: authentik
app.kubernetes.io/component: metrics
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: authentik-valkey
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
spec:
serviceName: authentik-valkey-headless
replicas: 3
podManagementPolicy: OrderedReady
selector:
matchLabels:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: authentik
volumeClaimTemplates:
- metadata:
name: valkey-data
spec:
accessModes:
- ReadWriteOnce
storageClassName: "ceph-block"
resources:
requests:
storage: "1Gi"
template:
metadata:
labels:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: authentik
annotations:
checksum/initconfig: "07891dc8f81b4fb3516e2993c6e827b1"
spec:
automountServiceAccountToken: false
serviceAccountName: authentik-valkey
securityContext:
fsGroup: 1000
runAsGroup: 1000
runAsUser: 1000
initContainers:
- name: authentik-valkey-init
image: docker.io/valkey/valkey:9.0.3
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
command: ["/scripts/init.sh"]
env:
- name: POD_INDEX
valueFrom:
fieldRef:
fieldPath: metadata.labels['apps.kubernetes.io/pod-index']
volumeMounts:
- name: valkey-data
mountPath: /data
- name: scripts
mountPath: /scripts
containers:
- name: authentik-valkey
image: docker.io/valkey/valkey:9.0.3
imagePullPolicy: IfNotPresent
command: ["valkey-server"]
args: ["/data/conf/valkey.conf"]
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
env:
- name: POD_INDEX
valueFrom:
fieldRef:
fieldPath: metadata.labels['apps.kubernetes.io/pod-index']
- name: VALKEY_LOGLEVEL
value: "notice"
ports:
- name: tcp
containerPort: 6379
protocol: TCP
startupProbe:
exec:
command: ["sh", "-c", "valkey-cli ping"]
livenessProbe:
exec:
command: ["sh", "-c", "valkey-cli ping"]
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- name: valkey-data
mountPath: /data
- name: metrics
image: ghcr.io/oliver006/redis_exporter:v1.82.0
imagePullPolicy: "IfNotPresent"
ports:
- name: metrics
containerPort: 9121
startupProbe:
tcpSocket:
port: metrics
livenessProbe:
tcpSocket:
port: metrics
readinessProbe:
httpGet:
path: /
port: metrics
resources:
requests:
cpu: 10m
memory: 64M
env:
- name: REDIS_ALIAS
value: authentik-valkey
volumes:
- name: scripts
configMap:
name: authentik-valkey-init-scripts
defaultMode: 0555