Merge pull request 'Automated Manifest Update' (#6466) from auto/update-manifests into manifests

Reviewed-on: #6466
This commit was merged in pull request #6466.
This commit is contained in:
2026-05-03 01:40:35 +00:00
64 changed files with 6 additions and 2718 deletions

View File

@@ -123,7 +123,7 @@ data:
komodo IN CNAME traefik-cl01tl komodo IN CNAME traefik-cl01tl
languagetool IN CNAME traefik-cl01tl languagetool IN CNAME traefik-cl01tl
lidarr IN CNAME traefik-cl01tl lidarr IN CNAME traefik-cl01tl
mail IN CNAME traefik-cl01tl loki IN CNAME traefik-cl01tl
medialyze IN CNAME traefik-cl01tl medialyze IN CNAME traefik-cl01tl
music-grabber IN CNAME traefik-cl01tl music-grabber IN CNAME traefik-cl01tl
navidrome IN CNAME traefik-cl01tl navidrome IN CNAME traefik-cl01tl
@@ -151,7 +151,6 @@ data:
sonarr-4k IN CNAME traefik-cl01tl sonarr-4k IN CNAME traefik-cl01tl
sonarr-anime IN CNAME traefik-cl01tl sonarr-anime IN CNAME traefik-cl01tl
sparkyfitness IN CNAME traefik-cl01tl sparkyfitness IN CNAME traefik-cl01tl
stalwart IN CNAME traefik-cl01tl
tdarr IN CNAME traefik-cl01tl tdarr IN CNAME traefik-cl01tl
tubearchivist IN CNAME traefik-cl01tl tubearchivist IN CNAME traefik-cl01tl
vault IN CNAME traefik-cl01tl vault IN CNAME traefik-cl01tl

View File

@@ -22,7 +22,7 @@ spec:
template: template:
metadata: metadata:
annotations: annotations:
checksum/configMaps: 6a74369df91b8e1256132009e63003401e6137c2e747c30f8f439de142d24c7b checksum/configMaps: 8aadfb0f8e3c44c960e3daba036be7e8b635c50df168eef754e6cdd0745e118c
labels: labels:
app.kubernetes.io/controller: main app.kubernetes.io/controller: main
app.kubernetes.io/instance: blocky app.kubernetes.io/instance: blocky

View File

@@ -31,7 +31,7 @@ data:
enable-webhook: true enable-webhook: true
webhook-name: elastic-operator.elastic-operator.k8s.elastic.co webhook-name: elastic-operator.elastic-operator.k8s.elastic.co
webhook-port: 9443 webhook-port: 9443
namespaces: [stalwart,tubearchivist] namespaces: [tubearchivist]
operator-namespace: elastic-operator operator-namespace: elastic-operator
enable-leader-election: true enable-leader-election: true
elasticsearch-observation-interval: 10s elasticsearch-observation-interval: 10s

View File

@@ -20,7 +20,7 @@ spec:
metadata: metadata:
annotations: annotations:
"co.elastic.logs/raw": "[{\"type\":\"filestream\",\"enabled\":true,\"id\":\"eck-container-logs-${data.kubernetes.container.id}\",\"paths\":[\"/var/log/containers/*${data.kubernetes.container.id}.log\"],\"parsers\":[{\"container\":{}},{\"ndjson\":{\"keys_under_root\":true}}],\"prospector.scanner.symlinks\":true,\"processors\":[{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"error\",\"to\":\"_error\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"_error\",\"to\":\"error.message\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"source\",\"to\":\"_source\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"_source\",\"to\":\"event.source\"}]}}]}]" "co.elastic.logs/raw": "[{\"type\":\"filestream\",\"enabled\":true,\"id\":\"eck-container-logs-${data.kubernetes.container.id}\",\"paths\":[\"/var/log/containers/*${data.kubernetes.container.id}.log\"],\"parsers\":[{\"container\":{}},{\"ndjson\":{\"keys_under_root\":true}}],\"prospector.scanner.symlinks\":true,\"processors\":[{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"error\",\"to\":\"_error\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"_error\",\"to\":\"error.message\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"source\",\"to\":\"_source\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"_source\",\"to\":\"event.source\"}]}}]}]"
"checksum/config": 15d5acbaf534c61de655db1042d2c8a173621b080c443dbc5e0e6566bab0ac63 "checksum/config": 7f07e518108e0e0e6b03c9684720cc66be5df6fe318d92df1793547baffc2306
labels: labels:
app.kubernetes.io/name: elastic-operator app.kubernetes.io/name: elastic-operator
app.kubernetes.io/instance: elastic-operator app.kubernetes.io/instance: elastic-operator

View File

@@ -357,24 +357,6 @@ data:
interval: 30s interval: 30s
name: authentik name: authentik
url: https://authentik.alexlebens.net url: https://authentik.alexlebens.net
- alerts:
- type: ntfy
conditions:
- '[STATUS] == 200'
- '[CERTIFICATE_EXPIRATION] > 240h'
group: core
interval: 30s
name: roundcube
url: https://mail.alexlebens.net
- alerts:
- type: ntfy
conditions:
- '[STATUS] == 200'
- '[CERTIFICATE_EXPIRATION] > 240h'
group: core
interval: 30s
name: stalwart
url: https://stalwart.alexlebens.net
- alerts: - alerts:
- type: ntfy - type: ntfy
conditions: conditions:

View File

@@ -26,7 +26,7 @@ spec:
app.kubernetes.io/name: gatus app.kubernetes.io/name: gatus
app.kubernetes.io/instance: gatus app.kubernetes.io/instance: gatus
annotations: annotations:
checksum/config: 84322f5f6d4ca8f740b634e70fd762c2e0badb1686585f5d8250ced5d2afda19 checksum/config: 016f8ee6d8cfbdfc312ab5048ef55a938f821cba728681f8145e85745c0224f8
spec: spec:
serviceAccountName: default serviceAccountName: default
automountServiceAccountToken: false automountServiceAccountToken: false

View File

@@ -417,24 +417,6 @@ data:
href: https://authentik.alexlebens.net href: https://authentik.alexlebens.net
siteMonitor: http://authentik-server.authentik:80 siteMonitor: http://authentik-server.authentik:80
statusStyle: dot statusStyle: dot
- Email Client:
icon: sh-roundcube.webp
description: Roundcube
href: https://mail.alexlebens.net
siteMonitor: http://roundcube.roundcube:80
statusStyle: dot
- Email Server:
icon: sh-stalwart.webp
description: Stalwart
href: https://stalwart.alexlebens.net
siteMonitor: http://stalwart.stalwart:80
statusStyle: dot
namespace: stalwart
app: stalwart
podSelector: >-
app.kubernetes.io/instance in (
stalwart
)
- Notifications: - Notifications:
icon: sh-ntfy.webp icon: sh-ntfy.webp
description: ntfy description: ntfy

View File

@@ -24,7 +24,7 @@ spec:
template: template:
metadata: metadata:
annotations: annotations:
checksum/configMaps: 93bedb9a595b5b1d2a52224b9ca6c6aad24ca55cb0ff848c7706af81f6785e8b checksum/configMaps: e9ad0ea163de6974f11ff965c12acd6223b75d1661495978c45dd9b790976b78
checksum/secrets: d3ba83f111cd32f92c909268c55ad8bbd4f9e299b74b35b33c1a011180d8b378 checksum/secrets: d3ba83f111cd32f92c909268c55ad8bbd4f9e299b74b35b33c1a011180d8b378
labels: labels:
app.kubernetes.io/controller: main app.kubernetes.io/controller: main

View File

@@ -1,66 +0,0 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: roundcube-postgresql-18-cluster
namespace: roundcube
labels:
app.kubernetes.io/name: roundcube-postgresql-18-cluster
helm.sh/chart: postgres-18-cluster-7.12.1
app.kubernetes.io/instance: roundcube
app.kubernetes.io/part-of: roundcube
app.kubernetes.io/version: "7.12.1"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:18.3-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 20m
memory: 80Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "roundcube-postgresql-18-backup-garage-local"
serverName: "roundcube-postgresql-18-backup-1"
bootstrap:
recovery:
database: app
source: roundcube-postgresql-18-backup-1
externalClusters:
- name: roundcube-postgresql-18-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "roundcube-postgresql-18-recovery"
serverName: roundcube-postgresql-18-backup-1

View File

@@ -1,38 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: roundcube
labels:
app.kubernetes.io/instance: roundcube
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: roundcube
helm.sh/chart: roundcube-4.6.2
namespace: roundcube
data:
default.conf: |
server {
listen 80 default_server;
server_name _;
root /var/www/html;
location / {
try_files $uri /index.php$is_args$args;
}
location ~ \.php(/|$) {
try_files $uri =404;
fastcgi_pass roundcube:9000;
fastcgi_read_timeout 300;
proxy_read_timeout 300;
fastcgi_split_path_info ^(.+\.php)(/.*)$;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $realpath_root$fastcgi_script_name;
fastcgi_param DOCUMENT_ROOT $realpath_root;
internal;
}
client_max_body_size 6m;
error_log /var/log/nginx/error.log;
access_log /var/log/nginx/access.log;
}

View File

@@ -1,81 +0,0 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: roundcube-cleandb
labels:
app.kubernetes.io/controller: cleandb
app.kubernetes.io/instance: roundcube
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: roundcube
helm.sh/chart: roundcube-4.6.2
namespace: roundcube
spec:
suspend: false
concurrencyPolicy: Forbid
startingDeadlineSeconds: 30
timeZone: America/Chicago
schedule: "30 4 * * *"
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 1
jobTemplate:
spec:
parallelism: 1
backoffLimit: 3
template:
metadata:
annotations:
checksum/configMaps: fb5b79e14a16673def67423a38952ae1855171d07a8332d9e863febcd28fce92
labels:
app.kubernetes.io/controller: cleandb
app.kubernetes.io/instance: roundcube
app.kubernetes.io/name: roundcube
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
restartPolicy: Never
containers:
- args:
- bin/cleandb.sh
env:
- name: ROUNDCUBEMAIL_DB_TYPE
value: pgsql
- name: ROUNDCUBEMAIL_DB_HOST
valueFrom:
secretKeyRef:
key: host
name: roundcube-postgresql-18-cluster-app
- name: ROUNDCUBEMAIL_DB_NAME
valueFrom:
secretKeyRef:
key: dbname
name: roundcube-postgresql-18-cluster-app
- name: ROUNDCUBEMAIL_DB_USER
valueFrom:
secretKeyRef:
key: user
name: roundcube-postgresql-18-cluster-app
- name: ROUNDCUBEMAIL_DB_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: roundcube-postgresql-18-cluster-app
- name: ROUNDCUBEMAIL_DES_KEY
valueFrom:
secretKeyRef:
key: DES_KEY
name: roundcube-key
- name: ROUNDCUBEMAIL_DEFAULT_HOST
value: tls://stalwart.stalwart
- name: ROUNDCUBEMAIL_SMTP_SERVER
value: tls://stalwart.stalwart
- name: ROUNDCUBEMAIL_SKIN
value: elastic
- name: ROUNDCUBEMAIL_PLUGINS
value: archive,zipdownload,newmail_notifier
image: roundcube/roundcubemail:1.6.15-fpm-alpine@sha256:0e07c1c66d5a1392f0c47cc79e85e0c60095108f715037d7d0aa3fd8cbe2e780
name: backup

View File

@@ -1,113 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: roundcube-main
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: roundcube
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: roundcube
helm.sh/chart: roundcube-4.6.2
namespace: roundcube
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: roundcube
app.kubernetes.io/instance: roundcube
template:
metadata:
annotations:
checksum/configMaps: fb5b79e14a16673def67423a38952ae1855171d07a8332d9e863febcd28fce92
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: roundcube
app.kubernetes.io/name: roundcube
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: ROUNDCUBEMAIL_DB_TYPE
value: pgsql
- name: ROUNDCUBEMAIL_DB_HOST
valueFrom:
secretKeyRef:
key: host
name: roundcube-postgresql-18-cluster-app
- name: ROUNDCUBEMAIL_DB_NAME
valueFrom:
secretKeyRef:
key: dbname
name: roundcube-postgresql-18-cluster-app
- name: ROUNDCUBEMAIL_DB_USER
valueFrom:
secretKeyRef:
key: user
name: roundcube-postgresql-18-cluster-app
- name: ROUNDCUBEMAIL_DB_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: roundcube-postgresql-18-cluster-app
- name: ROUNDCUBEMAIL_DES_KEY
valueFrom:
secretKeyRef:
key: DES_KEY
name: roundcube-key
- name: ROUNDCUBEMAIL_DEFAULT_HOST
value: stalwart.stalwart
- name: ROUNDCUBEMAIL_DEFAULT_PORT
value: "143"
- name: ROUNDCUBEMAIL_SMTP_SERVER
value: stalwart.stalwart
- name: ROUNDCUBEMAIL_SMTP_PORT
value: "25"
- name: ROUNDCUBEMAIL_SKIN
value: elastic
- name: ROUNDCUBEMAIL_PLUGINS
value: archive,zipdownload,newmail_notifier
image: roundcube/roundcubemail:1.6.15-fpm-alpine@sha256:0e07c1c66d5a1392f0c47cc79e85e0c60095108f715037d7d0aa3fd8cbe2e780
name: main
resources:
requests:
cpu: 1m
memory: 40Mi
volumeMounts:
- mountPath: /var/www/html
name: data
- mountPath: /tmp/roundcube-temp
name: temp
- env:
- name: NGINX_HOST
value: mail.alexlebens.net
- name: NGINX_PHP_CGI
value: roundcube.roundcube:9000
image: nginx:1.30.0-alpine-slim@sha256:830b40ff1beb5e018e56aef2ed1f9fe87a7797e35a555b75fea5c9568e316b04
name: nginx
volumeMounts:
- mountPath: /etc/nginx/conf.d/default.conf
mountPropagation: None
name: config
readOnly: true
subPath: default.conf
- mountPath: /var/www/html
name: data
volumes:
- configMap:
name: roundcube-config
name: config
- name: data
persistentVolumeClaim:
claimName: roundcube-data
- emptyDir: {}
name: temp

View File

@@ -1,47 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: roundcube-data-backup-secret-external
namespace: roundcube
labels:
helm.sh/chart: volsync-target-data-1.1.1
app.kubernetes.io/instance: roundcube
app.kubernetes.io/part-of: roundcube
app.kubernetes.io/version: "1.1.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: roundcube-data-backup-secret-external
spec:
secretStoreRef:
kind: ClusterSecretStore
name: openbao
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "s3:{{ .ENDPOINT }}/{{ .BUCKET }}/cl01tl/roundcube/roundcube-data"
data:
- secretKey: ENDPOINT
remoteRef:
key: /digital-ocean/config
property: ENDPOINT
- secretKey: BUCKET
remoteRef:
key: /digital-ocean/home-infra/volsync-backups
property: BUCKET
- secretKey: RESTIC_PASSWORD
remoteRef:
key: /digital-ocean/home-infra/volsync-backups
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
key: /digital-ocean/home-infra/volsync-backups
property: AWS_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
key: /digital-ocean/home-infra/volsync-backups
property: AWS_ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
key: /digital-ocean/home-infra/volsync-backups
property: AWS_SECRET_ACCESS_KEY

View File

@@ -1,47 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: roundcube-data-backup-secret-local
namespace: roundcube
labels:
helm.sh/chart: volsync-target-data-1.1.1
app.kubernetes.io/instance: roundcube
app.kubernetes.io/part-of: roundcube
app.kubernetes.io/version: "1.1.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: roundcube-data-backup-secret-local
spec:
secretStoreRef:
kind: ClusterSecretStore
name: openbao
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "s3:{{ .ENDPOINT }}/{{ .BUCKET }}/cl01tl/roundcube/roundcube-data"
data:
- secretKey: ENDPOINT
remoteRef:
key: /garage/config
property: ENDPOINT_LOCAL
- secretKey: BUCKET
remoteRef:
key: /garage/home-infra/volsync-backups
property: BUCKET
- secretKey: RESTIC_PASSWORD
remoteRef:
key: /garage/home-infra/volsync-backups
property: RESTIC_PASSWORD_LOCAL
- secretKey: AWS_DEFAULT_REGION
remoteRef:
key: /garage/home-infra/volsync-backups
property: ACCESS_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
key: /garage/home-infra/volsync-backups
property: ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
key: /garage/home-infra/volsync-backups
property: ACCESS_SECRET_KEY

View File

@@ -1,47 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: roundcube-data-backup-secret-remote
namespace: roundcube
labels:
helm.sh/chart: volsync-target-data-1.1.1
app.kubernetes.io/instance: roundcube
app.kubernetes.io/part-of: roundcube
app.kubernetes.io/version: "1.1.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: roundcube-data-backup-secret-remote
spec:
secretStoreRef:
kind: ClusterSecretStore
name: openbao
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "s3:{{ .ENDPOINT }}/{{ .BUCKET }}/cl01tl/roundcube/roundcube-data"
data:
- secretKey: ENDPOINT
remoteRef:
key: /garage/config
property: ENDPOINT_REMOTE
- secretKey: BUCKET
remoteRef:
key: /garage/home-infra/volsync-backups
property: BUCKET
- secretKey: RESTIC_PASSWORD
remoteRef:
key: /garage/home-infra/volsync-backups
property: RESTIC_PASSWORD_REMOTE
- secretKey: AWS_DEFAULT_REGION
remoteRef:
key: /garage/home-infra/volsync-backups
property: ACCESS_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
key: /garage/home-infra/volsync-backups
property: ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
key: /garage/home-infra/volsync-backups
property: ACCESS_SECRET_KEY

View File

@@ -1,18 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: roundcube-key
namespace: roundcube
labels:
app.kubernetes.io/name: roundcube-key
app.kubernetes.io/instance: roundcube
app.kubernetes.io/part-of: roundcube
spec:
secretStoreRef:
kind: ClusterSecretStore
name: openbao
data:
- secretKey: DES_KEY
remoteRef:
key: /cl01tl/roundcube/key
property: des-key

View File

@@ -1,29 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: roundcube-postgresql-18-backup-garage-local-secret
namespace: roundcube
labels:
app.kubernetes.io/name: roundcube-postgresql-18-backup-garage-local-secret
helm.sh/chart: postgres-18-cluster-7.12.1
app.kubernetes.io/instance: roundcube
app.kubernetes.io/part-of: roundcube
app.kubernetes.io/version: "7.12.1"
app.kubernetes.io/managed-by: Helm
spec:
secretStoreRef:
kind: ClusterSecretStore
name: openbao
data:
- secretKey: ACCESS_REGION
remoteRef:
key: /garage/home-infra/postgres-backups
property: ACCESS_REGION
- secretKey: ACCESS_KEY_ID
remoteRef:
key: /garage/home-infra/postgres-backups
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
key: /garage/home-infra/postgres-backups
property: ACCESS_SECRET_KEY

View File

@@ -1,29 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: roundcube-postgresql-18-recovery-secret
namespace: roundcube
labels:
helm.sh/chart: postgres-18-cluster-7.12.1
app.kubernetes.io/instance: roundcube
app.kubernetes.io/part-of: roundcube
app.kubernetes.io/version: "7.12.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: roundcube-postgresql-18-recovery-secret
spec:
secretStoreRef:
kind: ClusterSecretStore
name: openbao
data:
- secretKey: ACCESS_REGION
remoteRef:
key: /garage/home-infra/postgres-backups
property: ACCESS_REGION
- secretKey: ACCESS_KEY_ID
remoteRef:
key: /garage/home-infra/postgres-backups
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
key: /garage/home-infra/postgres-backups
property: ACCESS_SECRET_KEY

View File

@@ -1,30 +0,0 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: roundcube
labels:
app.kubernetes.io/instance: roundcube
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: roundcube
helm.sh/chart: roundcube-4.6.2
namespace: roundcube
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- "mail.alexlebens.net"
rules:
- backendRefs:
- group: ""
kind: Service
name: roundcube
namespace: roundcube
port: 80
weight: 1
matches:
- path:
type: PathPrefix
value: /

View File

@@ -1,33 +0,0 @@
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: roundcube-postgresql-18-backup-garage-local
namespace: roundcube
labels:
app.kubernetes.io/name: roundcube-postgresql-18-backup-garage-local
helm.sh/chart: postgres-18-cluster-7.12.1
app.kubernetes.io/instance: roundcube
app.kubernetes.io/part-of: roundcube
app.kubernetes.io/version: "7.12.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 7d
instanceSidecarConfiguration:
env:
- name: AWS_REQUEST_CHECKSUM_CALCULATION
value: when_required
- name: AWS_RESPONSE_CHECKSUM_VALIDATION
value: when_required
configuration:
destinationPath: s3://postgres-backups/cl01tl/roundcube/roundcube-postgresql-18-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: roundcube-postgresql-18-backup-garage-local-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: roundcube-postgresql-18-backup-garage-local-secret
key: ACCESS_SECRET_KEY
region:
name: roundcube-postgresql-18-backup-garage-local-secret
key: ACCESS_REGION

View File

@@ -1,32 +0,0 @@
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "roundcube-postgresql-18-recovery"
namespace: roundcube
labels:
helm.sh/chart: postgres-18-cluster-7.12.1
app.kubernetes.io/instance: roundcube
app.kubernetes.io/part-of: roundcube
app.kubernetes.io/version: "7.12.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: "roundcube-postgresql-18-recovery"
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/roundcube/roundcube-postgresql-18-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: roundcube-postgresql-18-recovery-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: roundcube-postgresql-18-recovery-secret
key: ACCESS_SECRET_KEY
region:
name: roundcube-postgresql-18-recovery-secret
key: ACCESS_REGION

View File

@@ -1,17 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: roundcube-data
labels:
app.kubernetes.io/instance: roundcube
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: roundcube
helm.sh/chart: roundcube-4.6.2
namespace: roundcube
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"

View File

@@ -1,30 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: roundcube-data-backup-source-local
namespace: roundcube
labels:
helm.sh/chart: volsync-target-data-1.1.1
app.kubernetes.io/instance: roundcube
app.kubernetes.io/part-of: roundcube
app.kubernetes.io/version: "1.1.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: roundcube-data-backup-source-local
spec:
groups:
- name: volsync.alerts
rules:
- alert: VolSyncBackupPodFailed
expr: |
(kube_pod_container_status_last_terminated_exitcode > 0)
* on(pod, namespace) group_left(owner_name)
kube_pod_owner{owner_kind="Job", owner_name=~"volsync-.*"}
for: 1m
labels:
severity: critical
annotations:
summary: "VolSync Backup Pod failed in {{ $labels.namespace }}"
description: |
A pod for the VolSync backup of PVC 'roundcube-data' failed with exit code {{ $value }}.
Job: {{ $labels.owner_name }}
Namespace: {{ $labels.namespace }}

View File

@@ -1,270 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: roundcube-postgresql-18-alert-rules
namespace: roundcube
labels:
app.kubernetes.io/name: roundcube-postgresql-18-alert-rules
helm.sh/chart: postgres-18-cluster-7.12.1
app.kubernetes.io/instance: roundcube
app.kubernetes.io/part-of: roundcube
app.kubernetes.io/version: "7.12.1"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/roundcube-postgresql-18
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total{namespace="roundcube"} > 300
for: 1m
labels:
severity: warning
namespace: roundcube
cnpg_cluster: roundcube-postgresql-18-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks{namespace="roundcube"} > 10
for: 1m
labels:
severity: warning
namespace: roundcube
cnpg_cluster: roundcube-postgresql-18-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="roundcube"} - cnpg_pg_replication_is_wal_receiver_up{namespace="roundcube"}) < 1
for: 5m
labels:
severity: critical
namespace: roundcube
cnpg_cluster: roundcube-postgresql-18-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="roundcube"} - cnpg_pg_replication_is_wal_receiver_up{namespace="roundcube"}) < 2
for: 5m
labels:
severity: warning
namespace: roundcube
cnpg_cluster: roundcube-postgresql-18-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "roundcube/roundcube-postgresql-18-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="roundcube", pod=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="roundcube", pod=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: roundcube
cnpg_cluster: roundcube-postgresql-18-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "roundcube/roundcube-postgresql-18-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="roundcube", pod=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="roundcube", pod=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: roundcube
cnpg_cluster: roundcube-postgresql-18-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "roundcube/roundcube-postgresql-18-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="roundcube",pod=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: roundcube
cnpg_cluster: roundcube-postgresql-18-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "roundcube/roundcube-postgresql-18-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="roundcube", pod=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: roundcube
cnpg_cluster: roundcube-postgresql-18-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds{namespace="roundcube"} > 300
for: 1m
labels:
severity: warning
namespace: roundcube
cnpg_cluster: roundcube-postgresql-18-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "roundcube/roundcube-postgresql-18-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: roundcube
cnpg_cluster: roundcube-postgresql-18-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "roundcube/roundcube-postgresql-18-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: roundcube
cnpg_cluster: roundcube-postgresql-18-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "roundcube/roundcube-postgresql-18-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="roundcube",pod=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: roundcube
cnpg_cluster: roundcube-postgresql-18-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age{namespace="roundcube"} > 300000000
for: 1m
labels:
severity: warning
namespace: roundcube
cnpg_cluster: roundcube-postgresql-18-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag{namespace="roundcube"} > 300
for: 1m
labels:
severity: warning
namespace: roundcube
cnpg_cluster: roundcube-postgresql-18-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery{namespace="roundcube"} > cnpg_pg_replication_is_wal_receiver_up{namespace="roundcube"}
for: 1m
labels:
severity: warning
namespace: roundcube
cnpg_cluster: roundcube-postgresql-18-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "roundcube/roundcube-postgresql-18-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="roundcube", pod=~"roundcube-postgresql-18-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: roundcube
cnpg_cluster: roundcube-postgresql-18-cluster

View File

@@ -1,29 +0,0 @@
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: roundcube-data-backup-source-external
namespace: roundcube
labels:
helm.sh/chart: volsync-target-data-1.1.1
app.kubernetes.io/instance: roundcube
app.kubernetes.io/part-of: roundcube
app.kubernetes.io/version: "1.1.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: roundcube-data-backup
spec:
sourcePVC: roundcube-data
trigger:
schedule: 12 13 * * *
restic:
pruneIntervalDays: 7
repository: roundcube-data-backup-secret-external
retain:
daily: 7
hourly: 0
monthly: 3
weekly: 4
yearly: 1
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 1Gi

View File

@@ -1,29 +0,0 @@
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: roundcube-data-backup-source-local
namespace: roundcube
labels:
helm.sh/chart: volsync-target-data-1.1.1
app.kubernetes.io/instance: roundcube
app.kubernetes.io/part-of: roundcube
app.kubernetes.io/version: "1.1.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: roundcube-data-backup-source-local
spec:
sourcePVC: roundcube-data
trigger:
schedule: 12 11 * * *
restic:
pruneIntervalDays: 7
repository: roundcube-data-backup-secret-local
retain:
daily: 7
hourly: 0
monthly: 3
weekly: 4
yearly: 1
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 1Gi

View File

@@ -1,29 +0,0 @@
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: roundcube-data-backup-source-remote
namespace: roundcube
labels:
helm.sh/chart: volsync-target-data-1.1.1
app.kubernetes.io/instance: roundcube
app.kubernetes.io/part-of: roundcube
app.kubernetes.io/version: "1.1.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: roundcube-data-backup
spec:
sourcePVC: roundcube-data
trigger:
schedule: 12 12 * * *
restic:
pruneIntervalDays: 7
repository: roundcube-data-backup-secret-remote
retain:
daily: 7
hourly: 0
monthly: 3
weekly: 4
yearly: 1
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 1Gi

View File

@@ -1,24 +0,0 @@
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "roundcube-postgresql-18-scheduled-backup-live-backup"
namespace: roundcube
labels:
app.kubernetes.io/name: "roundcube-postgresql-18-scheduled-backup-live-backup"
helm.sh/chart: postgres-18-cluster-7.12.1
app.kubernetes.io/instance: roundcube
app.kubernetes.io/part-of: roundcube
app.kubernetes.io/version: "7.12.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 40 15 * * *"
backupOwnerReference: self
cluster:
name: roundcube-postgresql-18-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "roundcube-postgresql-18-backup-garage-local"

View File

@@ -1,26 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: roundcube
labels:
app.kubernetes.io/instance: roundcube
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: roundcube
app.kubernetes.io/service: roundcube
helm.sh/chart: roundcube-4.6.2
namespace: roundcube
spec:
type: ClusterIP
ports:
- port: 9000
targetPort: 9000
protocol: TCP
name: mail
- port: 80
targetPort: 80
protocol: TCP
name: web
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: roundcube
app.kubernetes.io/name: roundcube

View File

@@ -1,66 +0,0 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: stalwart-postgresql-18-cluster
namespace: stalwart
labels:
app.kubernetes.io/name: stalwart-postgresql-18-cluster
helm.sh/chart: postgres-18-cluster-7.12.1
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "7.12.1"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:18.3-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 20m
memory: 80Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "stalwart-postgresql-18-backup-garage-local"
serverName: "stalwart-postgresql-18-backup-1"
bootstrap:
recovery:
database: app
source: stalwart-postgresql-18-backup-1
externalClusters:
- name: stalwart-postgresql-18-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "stalwart-postgresql-18-recovery"
serverName: stalwart-postgresql-18-backup-1

View File

@@ -1,87 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: stalwart-valkey-init-scripts
labels:
helm.sh/chart: valkey-0.9.4
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: stalwart
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
data:
init.sh: |-
#!/bin/sh
set -eu
# Default config paths
VALKEY_CONFIG=${VALKEY_CONFIG_PATH:-/data/conf/valkey.conf}
LOGFILE="/data/init.log"
DATA_DIR="/data/conf"
# Logging function (outputs to stderr and file)
log() {
echo "$(date) $1" | tee -a "$LOGFILE" >&2
}
# Clean old log if requested
if [ "${KEEP_OLD_LOGS:-false}" != "true" ]; then
rm -f "$LOGFILE"
fi
if [ -f "$LOGFILE" ]; then
log "Detected restart of this instance ($HOSTNAME)"
fi
log "Creating configuration in $DATA_DIR..."
mkdir -p "$DATA_DIR"
rm -f "$VALKEY_CONFIG"
# Base valkey.conf
log "Generating base valkey.conf"
{
echo "port 6379"
echo "protected-mode no"
echo "bind * -::*"
echo "dir /data"
} >>"$VALKEY_CONFIG"
# Replica mode configuration
log "Configuring replication mode"
# Use POD_INDEX from Kubernetes metadata
POD_INDEX=${POD_INDEX:-0}
IS_MASTER=false
# Check if this is pod-0 (master)
if [ "$POD_INDEX" = "0" ]; then
IS_MASTER=true
log "This pod (index $POD_INDEX) is configured as MASTER"
else
log "This pod (index $POD_INDEX) is configured as REPLICA"
fi
# Configure replica settings
if [ "$IS_MASTER" = "false" ]; then
MASTER_HOST="stalwart-valkey-0.stalwart-valkey-headless.stalwart.svc.cluster.local"
MASTER_PORT="6379"
log "Configuring replica to follow master at $MASTER_HOST:$MASTER_PORT"
{
echo ""
echo "# Replica Configuration"
echo "replicaof $MASTER_HOST $MASTER_PORT"
echo "replica-announce-ip stalwart-valkey-$POD_INDEX.stalwart-valkey-headless.stalwart.svc.cluster.local"
} >>"$VALKEY_CONFIG"
fi
# Append extra configs if present
if [ -f /usr/local/etc/valkey/valkey.conf ]; then
log "Appending /usr/local/etc/valkey/valkey.conf"
cat /usr/local/etc/valkey/valkey.conf >>"$VALKEY_CONFIG"
fi
if [ -d /extravalkeyconfigs ]; then
log "Appending files in /extravalkeyconfigs/"
cat /extravalkeyconfigs/* >>"$VALKEY_CONFIG"
fi

View File

@@ -1,45 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: stalwart-metrics
labels:
app.kubernetes.io/controller: metrics
app.kubernetes.io/instance: stalwart
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart
helm.sh/chart: stalwart-4.6.2
namespace: stalwart
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: metrics
app.kubernetes.io/name: stalwart
app.kubernetes.io/instance: stalwart
template:
metadata:
labels:
app.kubernetes.io/controller: metrics
app.kubernetes.io/instance: stalwart
app.kubernetes.io/name: stalwart
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- --es.uri=https://elasticsearch-stalwart-es-http.tubearchivist:9200
- --es.ssl-skip-verify
image: quay.io/prometheuscommunity/elasticsearch-exporter:v1.10.0@sha256:a6a4d4403f670faf6a94b8c7f9adbca3ead91f26dd64e5ccf95fa69025dc6e58
name: main
resources:
requests:
cpu: 1m
memory: 10Mi

View File

@@ -1,49 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: stalwart
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: stalwart
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart
helm.sh/chart: stalwart-4.6.2
namespace: stalwart
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: stalwart
app.kubernetes.io/instance: stalwart
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: stalwart
app.kubernetes.io/name: stalwart
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- image: ghcr.io/stalwartlabs/stalwart:v0.15.5@sha256:dcf575db2d53d9ef86d6ced8abe4ba491984659a0f8862cc6079ee7b41c3c568
name: main
resources:
requests:
cpu: 10m
memory: 100Mi
volumeMounts:
- mountPath: /opt/stalwart
name: config
volumes:
- name: config
persistentVolumeClaim:
claimName: stalwart-config

View File

@@ -1,29 +0,0 @@
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: elasticsearch-stalwart
namespace: stalwart
labels:
app.kubernetes.io/name: elasticsearch-stalwart
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
spec:
version: 9.3.3
auth:
fileRealm:
- secretName: stalwart-elasticsearch-config
nodeSets:
- name: default
count: 2
config:
node.store.allow_mmap: false
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
storageClassName: ceph-block

View File

@@ -1,47 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: stalwart-config-backup-secret-external
namespace: stalwart
labels:
helm.sh/chart: volsync-target-config-1.1.1
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "1.1.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart-config-backup-secret-external
spec:
secretStoreRef:
kind: ClusterSecretStore
name: openbao
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "s3:{{ .ENDPOINT }}/{{ .BUCKET }}/cl01tl/stalwart/stalwart-config"
data:
- secretKey: ENDPOINT
remoteRef:
key: /digital-ocean/config
property: ENDPOINT
- secretKey: BUCKET
remoteRef:
key: /digital-ocean/home-infra/volsync-backups
property: BUCKET
- secretKey: RESTIC_PASSWORD
remoteRef:
key: /digital-ocean/home-infra/volsync-backups
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
key: /digital-ocean/home-infra/volsync-backups
property: AWS_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
key: /digital-ocean/home-infra/volsync-backups
property: AWS_ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
key: /digital-ocean/home-infra/volsync-backups
property: AWS_SECRET_ACCESS_KEY

View File

@@ -1,47 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: stalwart-config-backup-secret-local
namespace: stalwart
labels:
helm.sh/chart: volsync-target-config-1.1.1
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "1.1.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart-config-backup-secret-local
spec:
secretStoreRef:
kind: ClusterSecretStore
name: openbao
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "s3:{{ .ENDPOINT }}/{{ .BUCKET }}/cl01tl/stalwart/stalwart-config"
data:
- secretKey: ENDPOINT
remoteRef:
key: /garage/config
property: ENDPOINT_LOCAL
- secretKey: BUCKET
remoteRef:
key: /garage/home-infra/volsync-backups
property: BUCKET
- secretKey: RESTIC_PASSWORD
remoteRef:
key: /garage/home-infra/volsync-backups
property: RESTIC_PASSWORD_LOCAL
- secretKey: AWS_DEFAULT_REGION
remoteRef:
key: /garage/home-infra/volsync-backups
property: ACCESS_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
key: /garage/home-infra/volsync-backups
property: ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
key: /garage/home-infra/volsync-backups
property: ACCESS_SECRET_KEY

View File

@@ -1,47 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: stalwart-config-backup-secret-remote
namespace: stalwart
labels:
helm.sh/chart: volsync-target-config-1.1.1
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "1.1.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart-config-backup-secret-remote
spec:
secretStoreRef:
kind: ClusterSecretStore
name: openbao
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "s3:{{ .ENDPOINT }}/{{ .BUCKET }}/cl01tl/stalwart/stalwart-config"
data:
- secretKey: ENDPOINT
remoteRef:
key: /garage/config
property: ENDPOINT_REMOTE
- secretKey: BUCKET
remoteRef:
key: /garage/home-infra/volsync-backups
property: BUCKET
- secretKey: RESTIC_PASSWORD
remoteRef:
key: /garage/home-infra/volsync-backups
property: RESTIC_PASSWORD_REMOTE
- secretKey: AWS_DEFAULT_REGION
remoteRef:
key: /garage/home-infra/volsync-backups
property: ACCESS_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
key: /garage/home-infra/volsync-backups
property: ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
key: /garage/home-infra/volsync-backups
property: ACCESS_SECRET_KEY

View File

@@ -1,26 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: stalwart-elasticsearch-config
namespace: stalwart
labels:
app.kubernetes.io/name: stalwart-elasticsearch-config
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
spec:
secretStoreRef:
kind: ClusterSecretStore
name: openbao
data:
- secretKey: username
remoteRef:
key: /cl01tl/stalwart/elasticsearch
property: username
- secretKey: password
remoteRef:
key: /cl01tl/stalwart/elasticsearch
property: password
- secretKey: roles
remoteRef:
key: /cl01tl/stalwart/elasticsearch
property: roles

View File

@@ -1,29 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: stalwart-postgresql-18-backup-garage-local-secret
namespace: stalwart
labels:
app.kubernetes.io/name: stalwart-postgresql-18-backup-garage-local-secret
helm.sh/chart: postgres-18-cluster-7.12.1
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "7.12.1"
app.kubernetes.io/managed-by: Helm
spec:
secretStoreRef:
kind: ClusterSecretStore
name: openbao
data:
- secretKey: ACCESS_REGION
remoteRef:
key: /garage/home-infra/postgres-backups
property: ACCESS_REGION
- secretKey: ACCESS_KEY_ID
remoteRef:
key: /garage/home-infra/postgres-backups
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
key: /garage/home-infra/postgres-backups
property: ACCESS_SECRET_KEY

View File

@@ -1,29 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: stalwart-postgresql-18-recovery-secret
namespace: stalwart
labels:
helm.sh/chart: postgres-18-cluster-7.12.1
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "7.12.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart-postgresql-18-recovery-secret
spec:
secretStoreRef:
kind: ClusterSecretStore
name: openbao
data:
- secretKey: ACCESS_REGION
remoteRef:
key: /garage/home-infra/postgres-backups
property: ACCESS_REGION
- secretKey: ACCESS_KEY_ID
remoteRef:
key: /garage/home-infra/postgres-backups
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
key: /garage/home-infra/postgres-backups
property: ACCESS_SECRET_KEY

View File

@@ -1,30 +0,0 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: stalwart
labels:
app.kubernetes.io/instance: stalwart
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart
helm.sh/chart: stalwart-4.6.2
namespace: stalwart
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- "stalwart.alexlebens.net"
rules:
- backendRefs:
- group: ""
kind: Service
name: stalwart
namespace: stalwart
port: 80
weight: 1
matches:
- path:
type: PathPrefix
value: /

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: stalwart
labels:
app.kubernetes.io/name: stalwart
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged

View File

@@ -1,33 +0,0 @@
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: stalwart-postgresql-18-backup-garage-local
namespace: stalwart
labels:
app.kubernetes.io/name: stalwart-postgresql-18-backup-garage-local
helm.sh/chart: postgres-18-cluster-7.12.1
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "7.12.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 7d
instanceSidecarConfiguration:
env:
- name: AWS_REQUEST_CHECKSUM_CALCULATION
value: when_required
- name: AWS_RESPONSE_CHECKSUM_VALIDATION
value: when_required
configuration:
destinationPath: s3://postgres-backups/cl01tl/stalwart/stalwart-postgresql-18-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: stalwart-postgresql-18-backup-garage-local-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: stalwart-postgresql-18-backup-garage-local-secret
key: ACCESS_SECRET_KEY
region:
name: stalwart-postgresql-18-backup-garage-local-secret
key: ACCESS_REGION

View File

@@ -1,32 +0,0 @@
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "stalwart-postgresql-18-recovery"
namespace: stalwart
labels:
helm.sh/chart: postgres-18-cluster-7.12.1
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "7.12.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: "stalwart-postgresql-18-recovery"
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/stalwart/stalwart-postgresql-18-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: stalwart-postgresql-18-recovery-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: stalwart-postgresql-18-recovery-secret
key: ACCESS_SECRET_KEY
region:
name: stalwart-postgresql-18-recovery-secret
key: ACCESS_REGION

View File

@@ -1,17 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: stalwart-config
labels:
app.kubernetes.io/instance: stalwart
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart
helm.sh/chart: stalwart-4.6.2
namespace: stalwart
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"

View File

@@ -1,23 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: stalwart-valkey
labels:
helm.sh/chart: valkey-0.9.4
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: stalwart
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: valkey
app.kubernetes.io/component: podmonitor
spec:
podMetricsEndpoints:
- port: metrics
interval: 30s
namespaceSelector:
matchNames:
- stalwart
selector:
matchLabels:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: stalwart

View File

@@ -1,165 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: elasticsearch
namespace: stalwart
labels:
app.kubernetes.io/name: elasticsearch
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
spec:
groups:
- name: ElasticsearchExporter
rules:
- alert: ElasticsearchHeapUsageTooHigh
expr: (elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"}) * 100 > 90 and elasticsearch_jvm_memory_max_bytes{area="heap"} > 0
for: 2m
labels:
severity: critical
annotations:
summary: Elasticsearch Heap Usage Too High (instance {{ $labels.instance }})
description: "The heap usage is over 90%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchHeapUsageWarning
expr: (elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"}) * 100 > 80 and elasticsearch_jvm_memory_max_bytes{area="heap"} > 0
for: 2m
labels:
severity: warning
annotations:
summary: Elasticsearch Heap Usage warning (instance {{ $labels.instance }})
description: "The heap usage is over 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchDiskOutOfSpace
expr: elasticsearch_filesystem_data_available_bytes / elasticsearch_filesystem_data_size_bytes * 100 < 10 and elasticsearch_filesystem_data_size_bytes > 0
for: 0m
labels:
severity: critical
annotations:
summary: Elasticsearch disk out of space (instance {{ $labels.instance }})
description: "The disk usage is over 90%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchDiskSpaceLow
expr: elasticsearch_filesystem_data_available_bytes / elasticsearch_filesystem_data_size_bytes * 100 < 20 and elasticsearch_filesystem_data_size_bytes > 0
for: 2m
labels:
severity: warning
annotations:
summary: Elasticsearch disk space low (instance {{ $labels.instance }})
description: "The disk usage is over 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchClusterRed
expr: elasticsearch_cluster_health_status{color="red"} == 1
for: 0m
labels:
severity: critical
annotations:
summary: Elasticsearch Cluster Red (instance {{ $labels.instance }})
description: "Elastic Cluster Red status\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchClusterYellow
expr: elasticsearch_cluster_health_status{color="yellow"} == 1
for: 0m
labels:
severity: warning
annotations:
summary: Elasticsearch Cluster Yellow (instance {{ $labels.instance }})
description: "Elastic Cluster Yellow status\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchHealthyNodes
expr: elasticsearch_cluster_health_number_of_nodes < 3
for: 1m
labels:
severity: critical
annotations:
summary: Elasticsearch Healthy Nodes (instance {{ $labels.instance }})
description: "Missing node in Elasticsearch cluster\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchHealthyDataNodes
expr: elasticsearch_cluster_health_number_of_data_nodes < 3
for: 1m
labels:
severity: critical
annotations:
summary: Elasticsearch Healthy Data Nodes (instance {{ $labels.instance }})
description: "Missing data node in Elasticsearch cluster\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchRelocatingShards
expr: elasticsearch_cluster_health_relocating_shards > 0
for: 0m
labels:
severity: info
annotations:
summary: Elasticsearch relocating shards (instance {{ $labels.instance }})
description: "Elasticsearch is relocating shards\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchRelocatingShardsTooLong
expr: elasticsearch_cluster_health_relocating_shards > 0
for: 15m
labels:
severity: warning
annotations:
summary: Elasticsearch relocating shards too long (instance {{ $labels.instance }})
description: "Elasticsearch has been relocating shards for 15min\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchInitializingShards
expr: elasticsearch_cluster_health_initializing_shards > 0
for: 0m
labels:
severity: info
annotations:
summary: Elasticsearch initializing shards (instance {{ $labels.instance }})
description: "Elasticsearch is initializing shards\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchInitializingShardsTooLong
expr: elasticsearch_cluster_health_initializing_shards > 0
for: 15m
labels:
severity: warning
annotations:
summary: Elasticsearch initializing shards too long (instance {{ $labels.instance }})
description: "Elasticsearch has been initializing shards for 15 min\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchUnassignedShards
expr: elasticsearch_cluster_health_unassigned_shards > 0
for: 2m
labels:
severity: critical
annotations:
summary: Elasticsearch unassigned shards (instance {{ $labels.instance }})
description: "Elasticsearch has unassigned shards\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchPendingTasks
expr: elasticsearch_cluster_health_number_of_pending_tasks > 0
for: 15m
labels:
severity: warning
annotations:
summary: Elasticsearch pending tasks (instance {{ $labels.instance }})
description: "Elasticsearch has pending tasks. Cluster works slowly.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchNoNewDocuments
expr: increase(elasticsearch_indices_indexing_index_total{es_data_node="true"}[10m]) < 1
for: 0m
labels:
severity: warning
annotations:
summary: Elasticsearch no new documents (instance {{ $labels.instance }})
description: "No new documents for 10 min!\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchHighIndexingLatency
expr: rate(elasticsearch_indices_indexing_index_time_seconds_total[5m]) / rate(elasticsearch_indices_indexing_index_total[5m]) > 0.01 and rate(elasticsearch_indices_indexing_index_total[5m]) > 0
for: 10m
labels:
severity: warning
annotations:
summary: Elasticsearch High Indexing Latency (instance {{ $labels.instance }})
description: "The indexing latency on Elasticsearch cluster is higher than the threshold (current value: {{ $value }}s).\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchHighIndexingRate
expr: sum(rate(elasticsearch_indices_indexing_index_total[1m]))> 10000
for: 5m
labels:
severity: warning
annotations:
summary: Elasticsearch High Indexing Rate (instance {{ $labels.instance }})
description: "The indexing rate on Elasticsearch cluster is higher than the threshold.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchHighQueryRate
expr: sum(rate(elasticsearch_indices_search_query_total[1m])) > 100
for: 5m
labels:
severity: warning
annotations:
summary: Elasticsearch High Query Rate (instance {{ $labels.instance }})
description: "The query rate on Elasticsearch cluster is higher than the threshold.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchHighQueryLatency
expr: rate(elasticsearch_indices_search_query_time_seconds[1m]) / rate(elasticsearch_indices_search_query_total[1m]) > 1 and rate(elasticsearch_indices_search_query_total[1m]) > 0
for: 5m
labels:
severity: warning
annotations:
summary: Elasticsearch High Query Latency (instance {{ $labels.instance }})
description: "The query latency on Elasticsearch cluster is higher than the threshold (current value: {{ $value }}s).\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

View File

@@ -1,30 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: stalwart-config-backup-source-local
namespace: stalwart
labels:
helm.sh/chart: volsync-target-config-1.1.1
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "1.1.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart-config-backup-source-local
spec:
groups:
- name: volsync.alerts
rules:
- alert: VolSyncBackupPodFailed
expr: |
(kube_pod_container_status_last_terminated_exitcode > 0)
* on(pod, namespace) group_left(owner_name)
kube_pod_owner{owner_kind="Job", owner_name=~"volsync-.*"}
for: 1m
labels:
severity: critical
annotations:
summary: "VolSync Backup Pod failed in {{ $labels.namespace }}"
description: |
A pod for the VolSync backup of PVC 'stalwart-config' failed with exit code {{ $value }}.
Job: {{ $labels.owner_name }}
Namespace: {{ $labels.namespace }}

View File

@@ -1,270 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: stalwart-postgresql-18-alert-rules
namespace: stalwart
labels:
app.kubernetes.io/name: stalwart-postgresql-18-alert-rules
helm.sh/chart: postgres-18-cluster-7.12.1
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "7.12.1"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/stalwart-postgresql-18
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total{namespace="stalwart"} > 300
for: 1m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-18-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks{namespace="stalwart"} > 10
for: 1m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-18-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="stalwart"} - cnpg_pg_replication_is_wal_receiver_up{namespace="stalwart"}) < 1
for: 5m
labels:
severity: critical
namespace: stalwart
cnpg_cluster: stalwart-postgresql-18-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="stalwart"} - cnpg_pg_replication_is_wal_receiver_up{namespace="stalwart"}) < 2
for: 5m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-18-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-18-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="stalwart", pod=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="stalwart", pod=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: stalwart
cnpg_cluster: stalwart-postgresql-18-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-18-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="stalwart", pod=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="stalwart", pod=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-18-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-18-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="stalwart",pod=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-18-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-18-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="stalwart", pod=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-18-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds{namespace="stalwart"} > 300
for: 1m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-18-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-18-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: stalwart
cnpg_cluster: stalwart-postgresql-18-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-18-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-18-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-18-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="stalwart",pod=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: stalwart
cnpg_cluster: stalwart-postgresql-18-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age{namespace="stalwart"} > 300000000
for: 1m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-18-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag{namespace="stalwart"} > 300
for: 1m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-18-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery{namespace="stalwart"} > cnpg_pg_replication_is_wal_receiver_up{namespace="stalwart"}
for: 1m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-18-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-18-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="stalwart", pod=~"stalwart-postgresql-18-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-18-cluster

View File

@@ -1,47 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: stalwart-valkey
labels:
helm.sh/chart: valkey-0.9.4
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: stalwart
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: valkey
spec:
groups:
- name: stalwart-valkey
rules:
- alert: ValkeyDown
annotations:
description: Valkey instance {{ $labels.instance }} is down.
summary: Valkey instance {{ $labels.instance }} down
expr: |
redis_up{service="stalwart-valkey-metrics"} == 0
for: 2m
labels:
severity: error
- alert: ValkeyMemoryHigh
annotations:
description: |
Valkey instance {{ $labels.instance }} is using {{ $value }}% of its available memory.
summary: Valkey instance {{ $labels.instance }} is using too much memory
expr: |
redis_memory_used_bytes{service="stalwart-valkey-metrics"} * 100
/
redis_memory_max_bytes{service="stalwart-valkey-metrics"}
> 90 <= 100
for: 2m
labels:
severity: error
- alert: ValkeyKeyEviction
annotations:
description: |
Valkey instance {{ $labels.instance }} has evicted {{ $value }} keys in the last 5 minutes.
summary: Valkey instance {{ $labels.instance }} has evicted keys
expr: |
increase(redis_evicted_keys_total{service="stalwart-valkey-metrics"}[5m]) > 0
for: 1s
labels:
severity: error

View File

@@ -1,29 +0,0 @@
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: stalwart-config-backup-source-external
namespace: stalwart
labels:
helm.sh/chart: volsync-target-config-1.1.1
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "1.1.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart-config-backup
spec:
sourcePVC: stalwart-config
trigger:
schedule: 28 13 * * *
restic:
pruneIntervalDays: 7
repository: stalwart-config-backup-secret-external
retain:
daily: 7
hourly: 0
monthly: 3
weekly: 4
yearly: 1
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 1Gi

View File

@@ -1,29 +0,0 @@
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: stalwart-config-backup-source-local
namespace: stalwart
labels:
helm.sh/chart: volsync-target-config-1.1.1
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "1.1.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart-config-backup-source-local
spec:
sourcePVC: stalwart-config
trigger:
schedule: 28 11 * * *
restic:
pruneIntervalDays: 7
repository: stalwart-config-backup-secret-local
retain:
daily: 7
hourly: 0
monthly: 3
weekly: 4
yearly: 1
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 1Gi

View File

@@ -1,29 +0,0 @@
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: stalwart-config-backup-source-remote
namespace: stalwart
labels:
helm.sh/chart: volsync-target-config-1.1.1
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "1.1.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart-config-backup
spec:
sourcePVC: stalwart-config
trigger:
schedule: 28 12 * * *
restic:
pruneIntervalDays: 7
repository: stalwart-config-backup-secret-remote
retain:
daily: 7
hourly: 0
monthly: 3
weekly: 4
yearly: 1
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 1Gi

View File

@@ -1,24 +0,0 @@
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "stalwart-postgresql-18-scheduled-backup-live-backup"
namespace: stalwart
labels:
app.kubernetes.io/name: "stalwart-postgresql-18-scheduled-backup-live-backup"
helm.sh/chart: postgres-18-cluster-7.12.1
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "7.12.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 5 16 * * *"
backupOwnerReference: self
cluster:
name: stalwart-postgresql-18-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "stalwart-postgresql-18-backup-garage-local"

View File

@@ -1,22 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: stalwart-metrics
labels:
app.kubernetes.io/instance: stalwart
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart
app.kubernetes.io/service: stalwart-metrics
helm.sh/chart: stalwart-4.6.2
namespace: stalwart
spec:
type: ClusterIP
ports:
- port: 9114
targetPort: 9114
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: metrics
app.kubernetes.io/instance: stalwart
app.kubernetes.io/name: stalwart

View File

@@ -1,23 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: stalwart-valkey-headless
labels:
helm.sh/chart: valkey-0.9.4
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: stalwart
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: headless
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: tcp
port: 6379
targetPort: tcp
protocol: TCP
selector:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: stalwart

View File

@@ -1,23 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: stalwart-valkey-metrics
labels:
helm.sh/chart: valkey-0.9.4
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: stalwart
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: metrics
app.kubernetes.io/part-of: valkey
annotations:
spec:
type: ClusterIP
ports:
- name: metrics
port: 9121
protocol: TCP
targetPort: metrics
selector:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: stalwart

View File

@@ -1,21 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: stalwart-valkey-read
labels:
helm.sh/chart: valkey-0.9.4
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: stalwart
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: read
spec:
type: ClusterIP
ports:
- name: tcp
port: 6379
targetPort: tcp
protocol: TCP
selector:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: stalwart

View File

@@ -1,22 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: stalwart-valkey
labels:
helm.sh/chart: valkey-0.9.4
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: stalwart
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: primary
spec:
type: ClusterIP
ports:
- port: 6379
targetPort: tcp
protocol: TCP
name: tcp
selector:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: stalwart
statefulset.kubernetes.io/pod-name: stalwart-valkey-0

View File

@@ -1,38 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: stalwart
labels:
app.kubernetes.io/instance: stalwart
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart
app.kubernetes.io/service: stalwart
helm.sh/chart: stalwart-4.6.2
namespace: stalwart
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: http
- port: 143
targetPort: 143
protocol: TCP
name: imap
- port: 993
targetPort: 993
protocol: TCP
name: imaps
- port: 25
targetPort: 25
protocol: TCP
name: smtp
- port: 465
targetPort: 465
protocol: TCP
name: smtps
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: stalwart
app.kubernetes.io/name: stalwart

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: stalwart-valkey
labels:
helm.sh/chart: valkey-0.9.4
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: stalwart
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
automountServiceAccountToken: false

View File

@@ -1,24 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: stalwart-valkey
labels:
helm.sh/chart: valkey-0.9.4
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: stalwart
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: valkey
app.kubernetes.io/component: service-monitor
spec:
endpoints:
- port: metrics
interval: 30s
namespaceSelector:
matchNames:
- stalwart
selector:
matchLabels:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: stalwart
app.kubernetes.io/component: metrics

View File

@@ -1,24 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: stalwart
labels:
app.kubernetes.io/instance: stalwart
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart
helm.sh/chart: stalwart-4.6.2
namespace: stalwart
spec:
jobLabel: stalwart
namespaceSelector:
matchNames:
- stalwart
selector:
matchLabels:
app.kubernetes.io/instance: stalwart-metrics
app.kubernetes.io/name: stalwart-metrics
endpoints:
- interval: 30s
path: /metrics
port: metrics
scrapeTimeout: 10s

View File

@@ -1,133 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: stalwart-valkey
labels:
helm.sh/chart: valkey-0.9.4
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: stalwart
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
spec:
serviceName: stalwart-valkey-headless
replicas: 3
podManagementPolicy: OrderedReady
selector:
matchLabels:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: stalwart
volumeClaimTemplates:
- metadata:
name: valkey-data
spec:
accessModes:
- ReadWriteOnce
storageClassName: "ceph-block"
resources:
requests:
storage: "1Gi"
template:
metadata:
labels:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: stalwart
annotations:
checksum/initconfig: "4c6b93b8a76c270b0b7536ec44133194"
spec:
automountServiceAccountToken: false
serviceAccountName: stalwart-valkey
securityContext:
fsGroup: 1000
runAsGroup: 1000
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
initContainers:
- name: stalwart-valkey-init
image: docker.io/valkey/valkey:9.0.3@sha256:3b55fbaa0cd93cf0d9d961f405e4dfcc70efe325e2d84da207a0a8e6d8fde4f9
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
command: ["/scripts/init.sh"]
env:
- name: POD_INDEX
valueFrom:
fieldRef:
fieldPath: metadata.labels['apps.kubernetes.io/pod-index']
volumeMounts:
- name: valkey-data
mountPath: /data
- name: scripts
mountPath: /scripts
containers:
- name: stalwart-valkey
image: docker.io/valkey/valkey:9.0.3@sha256:3b55fbaa0cd93cf0d9d961f405e4dfcc70efe325e2d84da207a0a8e6d8fde4f9
imagePullPolicy: IfNotPresent
command: ["valkey-server"]
args: ["/data/conf/valkey.conf"]
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
env:
- name: POD_INDEX
valueFrom:
fieldRef:
fieldPath: metadata.labels['apps.kubernetes.io/pod-index']
- name: VALKEY_LOGLEVEL
value: "notice"
ports:
- name: tcp
containerPort: 6379
protocol: TCP
startupProbe:
exec:
command: ["sh", "-c", "valkey-cli ping"]
livenessProbe:
exec:
command: ["sh", "-c", "valkey-cli ping"]
resources:
requests:
cpu: 10m
memory: 20Mi
volumeMounts:
- name: valkey-data
mountPath: /data
- name: metrics
image: ghcr.io/oliver006/redis_exporter:v1.83.0@sha256:e8c209894d4c0cc55b1259ddd47e0b769ad1ff864b356736ee885462a3b0e48c
imagePullPolicy: "IfNotPresent"
ports:
- name: metrics
containerPort: 9121
startupProbe:
tcpSocket:
port: metrics
livenessProbe:
tcpSocket:
port: metrics
readinessProbe:
httpGet:
path: /
port: metrics
resources:
requests:
cpu: 1m
memory: 10M
env:
- name: REDIS_ALIAS
value: stalwart-valkey
volumes:
- name: scripts
configMap:
name: stalwart-valkey-init-scripts
defaultMode: 0555