Automated Manifest Update #2173

Merged
alexlebens merged 1 commits from auto/update-manifests into manifests 2025-12-02 01:49:16 +00:00
20 changed files with 15533 additions and 67 deletions

View File

@@ -0,0 +1,471 @@
---
# Source: audiobookshelf/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: audiobookshelf-nfs-storage
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-nfs-storage
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: audiobookshelf/charts/audiobookshelf/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: audiobookshelf-config
labels:
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: audiobookshelf
helm.sh/chart: audiobookshelf-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: audiobookshelf
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "2Gi"
storageClassName: "ceph-block"
---
# Source: audiobookshelf/charts/audiobookshelf/templates/common.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: audiobookshelf-metadata
labels:
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: audiobookshelf
helm.sh/chart: audiobookshelf-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: audiobookshelf
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: audiobookshelf/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: audiobookshelf-nfs-storage-backup
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-nfs-storage-backup
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
volumeMode: Filesystem
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
# Source: audiobookshelf/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: audiobookshelf-nfs-storage
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-nfs-storage
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
volumeName: audiobookshelf-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: audiobookshelf/charts/audiobookshelf/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: audiobookshelf
labels:
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: audiobookshelf
app.kubernetes.io/service: audiobookshelf
helm.sh/chart: audiobookshelf-4.4.0
namespace: audiobookshelf
spec:
type: ClusterIP
ports:
- port: 8000
targetPort: 8000
protocol: TCP
name: apprise
- port: 80
targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/name: audiobookshelf
---
# Source: audiobookshelf/charts/audiobookshelf/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: audiobookshelf
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: audiobookshelf
helm.sh/chart: audiobookshelf-4.4.0
namespace: audiobookshelf
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: audiobookshelf
app.kubernetes.io/instance: audiobookshelf
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/name: audiobookshelf
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PGID
value: "1000"
- name: PUID
value: "1000"
- name: APPRISE_STORAGE_MODE
value: memory
- name: APPRISE_STATEFUL_MODE
value: disabled
- name: APPRISE_WORKER_COUNT
value: "1"
- name: APPRISE_STATELESS_URLS
valueFrom:
secretKeyRef:
key: ntfy-url
name: audiobookshelf-apprise-config
image: caronc/apprise:1.2.6
imagePullPolicy: IfNotPresent
name: apprise-api
resources:
requests:
cpu: 10m
memory: 128Mi
- env:
- name: TZ
value: US/Central
image: ghcr.io/advplyr/audiobookshelf:2.30.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /mnt/store/
name: audiobooks
- mountPath: /metadata/backups
name: backup
- mountPath: /config
name: config
- mountPath: /metadata
name: metadata
volumes:
- name: audiobooks
persistentVolumeClaim:
claimName: audiobookshelf-nfs-storage
- name: backup
persistentVolumeClaim:
claimName: audiobookshelf-nfs-storage-backup
- name: config
persistentVolumeClaim:
claimName: audiobookshelf-config
- name: metadata
persistentVolumeClaim:
claimName: audiobookshelf-metadata
---
# Source: audiobookshelf/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: audiobookshelf-apprise-config
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-apprise-config
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ntfy-url
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/audiobookshelf/apprise
metadataPolicy: None
property: ntfy-url
---
# Source: audiobookshelf/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: audiobookshelf-config-backup-secret
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-config-backup-secret
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/audiobookshelf/audiobookshelf-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: audiobookshelf/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: audiobookshelf-metadata-backup-secret
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-metadata-backup-secret
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/audiobookshelf/audiobookshelf-metadata"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: audiobookshelf/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-audiobookshelf
namespace: audiobookshelf
labels:
app.kubernetes.io/name: http-route-audiobookshelf
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- audiobookshelf.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: audiobookshelf
port: 80
weight: 100
---
# Source: audiobookshelf/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: audiobookshelf-config-backup-source
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-config-backup-source
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
sourcePVC: audiobookshelf-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: audiobookshelf-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: audiobookshelf/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: audiobookshelf-metadata-backup-source
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-metadata-backup-source
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
sourcePVC: audiobookshelf-metadata
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: audiobookshelf-metadata-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: audiobookshelf/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: audiobookshelf-apprise
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-apprise
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
endpoints:
- port: apprise
interval: 30s
scrapeTimeout: 15s
path: /metrics
selector:
matchLabels:
app.kubernetes.io/name: audiobookshelf
app.kubernetes.io/instance: audiobookshelf

View File

@@ -0,0 +1,946 @@
---
# Source: booklore/templates/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: booklore
annotations:
volsync.backube/privileged-movers: "true"
labels:
app.kubernetes.io/name: booklore
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
---
# Source: booklore/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: booklore-books-nfs-storage
namespace: booklore
labels:
app.kubernetes.io/name: booklore-books-nfs-storage
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Books
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: booklore/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: booklore-books-import-nfs-storage
namespace: booklore
labels:
app.kubernetes.io/name: booklore-books-import-nfs-storage
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Books Import
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: booklore/charts/booklore/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: booklore-config
labels:
app.kubernetes.io/instance: booklore
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: booklore
helm.sh/chart: booklore-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: booklore
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: booklore/charts/booklore/templates/common.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: booklore-data
labels:
app.kubernetes.io/instance: booklore
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: booklore
helm.sh/chart: booklore-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: booklore
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: booklore/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: booklore-books-nfs-storage
namespace: booklore
labels:
app.kubernetes.io/name: booklore-books-nfs-storage
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
volumeName: booklore-books-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: booklore/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: booklore-books-import-nfs-storage
namespace: booklore
labels:
app.kubernetes.io/name: booklore-books-import-nfs-storage
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
volumeName: booklore-books-import-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: booklore/charts/booklore/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: booklore
labels:
app.kubernetes.io/instance: booklore
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: booklore
app.kubernetes.io/service: booklore
helm.sh/chart: booklore-4.4.0
namespace: booklore
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 6060
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: booklore
app.kubernetes.io/name: booklore
---
# Source: booklore/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: garage-ps10rp
namespace: booklore
labels:
app.kubernetes.io/name: garage-ps10rp
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
annotations:
tailscale.com/tailnet-fqdn: garage-ps10rp.boreal-beaufort.ts.net
spec:
externalName: placeholder
type: ExternalName
---
# Source: booklore/charts/booklore/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: booklore
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: booklore
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: booklore
helm.sh/chart: booklore-4.4.0
namespace: booklore
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: booklore
app.kubernetes.io/instance: booklore
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: booklore
app.kubernetes.io/name: booklore
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: America/Chicago
- name: DATABASE_URL
value: jdbc:mariadb://booklore-mariadb-cluster-primary.booklore:3306/booklore
- name: DATABASE_USERNAME
value: booklore
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: booklore-database-secret
- name: BOOKLORE_PORT
value: "6060"
- name: SWAGGER_ENABLED
value: "false"
image: ghcr.io/booklore-app/booklore:v1.12.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 50m
memory: 128Mi
volumeMounts:
- mountPath: /bookdrop
name: books-import
- mountPath: /app/data
name: config
- mountPath: /data
name: data
- mountPath: /bookdrop/ingest
name: ingest
volumes:
- emptyDir: {}
name: books-import
- name: config
persistentVolumeClaim:
claimName: booklore-config
- name: data
persistentVolumeClaim:
claimName: booklore-data
- name: ingest
persistentVolumeClaim:
claimName: booklore-books-import-nfs-storage
---
# Source: booklore/charts/mariadb-cluster/templates/database.yaml
apiVersion: k8s.mariadb.com/v1alpha1
kind: Database
metadata:
name: booklore-mariadb-cluster-booklore
namespace: booklore
labels:
helm.sh/chart: mariadb-cluster-25.10.2
app.kubernetes.io/name: mariadb-cluster
app.kubernetes.io/instance: booklore
app.kubernetes.io/version: "0.0.0"
app.kubernetes.io/managed-by: Helm
spec:
mariaDbRef:
name: booklore-mariadb-cluster
namespace: booklore
characterSet: utf8
cleanupPolicy: Delete
collate: utf8_general_ci
name: booklore
requeueInterval: 10h
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-database-secret
namespace: booklore
labels:
app.kubernetes.io/name: booklore-database-secret
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: password
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/booklore/database
metadataPolicy: None
property: password
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-data-replication-secret
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-replication-secret
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: psk.txt
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/booklore/replication
metadataPolicy: None
property: psk.txt
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-config-backup-secret
namespace: booklore
labels:
app.kubernetes.io/name: booklore-config-backup-secret
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/booklore/booklore-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_SECRET_ACCESS_KEY
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-data-backup-secret-local
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-backup-secret-local
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/booklore/booklore-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-local
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-local
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-data-backup-secret-remote
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-backup-secret-remote
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/booklore/booklore-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-remote
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-remote
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-data-backup-secret-external
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-backup-secret-external
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/booklore/booklore-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_SECRET_ACCESS_KEY
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-mariadb-cluster-backup-secret-external
namespace: booklore
labels:
app.kubernetes.io/name: booklore-mariadb-cluster-backup-secret-external
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: access
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/mariadb-backups
metadataPolicy: None
property: access
- secretKey: secret
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/mariadb-backups
metadataPolicy: None
property: secret
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-mariadb-cluster-backup-secret-garage
namespace: booklore
labels:
app.kubernetes.io/name: booklore-mariadb-cluster-backup-secret-garage
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: access
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/mariadb-backups
metadataPolicy: None
property: access
- secretKey: secret
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/mariadb-backups
metadataPolicy: None
property: secret
---
# Source: booklore/charts/mariadb-cluster/templates/grant.yaml
apiVersion: k8s.mariadb.com/v1alpha1
kind: Grant
metadata:
name: booklore-mariadb-cluster-booklore
namespace: booklore
labels:
helm.sh/chart: mariadb-cluster-25.10.2
app.kubernetes.io/name: mariadb-cluster
app.kubernetes.io/instance: booklore
app.kubernetes.io/version: "0.0.0"
app.kubernetes.io/managed-by: Helm
spec:
mariaDbRef:
name: booklore-mariadb-cluster
namespace: booklore
cleanupPolicy: Delete
database: booklore
grantOption: true
host: '%'
privileges:
- ALL PRIVILEGES
requeueInterval: 10h
retryInterval: 30s
table: '*'
username: booklore
---
# Source: booklore/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-booklore
namespace: booklore
labels:
app.kubernetes.io/name: http-route-booklore
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- booklore.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: booklore
port: 80
weight: 100
---
# Source: booklore/charts/mariadb-cluster/templates/mariadb.yaml
apiVersion: k8s.mariadb.com/v1alpha1
kind: MariaDB
metadata:
name: booklore-mariadb-cluster
namespace: booklore
labels:
helm.sh/chart: mariadb-cluster-25.10.2
app.kubernetes.io/name: mariadb-cluster
app.kubernetes.io/instance: booklore
app.kubernetes.io/version: "0.0.0"
app.kubernetes.io/managed-by: Helm
spec:
galera:
enabled: true
replicas: 3
rootPasswordSecretKeyRef:
generate: false
key: password
name: booklore-database-secret
storage:
size: 5Gi
---
# Source: booklore/charts/mariadb-cluster/templates/physicalbackup.yaml
apiVersion: k8s.mariadb.com/v1alpha1
kind: PhysicalBackup
metadata:
name: booklore-mariadb-cluster-backup-external
namespace: booklore
labels:
helm.sh/chart: mariadb-cluster-25.10.2
app.kubernetes.io/name: mariadb-cluster
app.kubernetes.io/instance: booklore
app.kubernetes.io/version: "0.0.0"
app.kubernetes.io/managed-by: Helm
spec:
mariaDbRef:
name: booklore-mariadb-cluster
namespace: booklore
compression: gzip
maxRetention: 720h
schedule:
cron: 0 0 * * 0
immediate: true
suspend: false
storage:
s3:
accessKeyIdSecretKeyRef:
key: access
name: booklore-mariadb-cluster-backup-secret-external
bucket: mariadb-backups-b230a2f5aecf080a4b372c08
endpoint: nyc3.digitaloceanspaces.com
prefix: cl01tl/booklore
region: us-east-1
secretAccessKeySecretKeyRef:
key: secret
name: booklore-mariadb-cluster-backup-secret-external
tls:
enabled: true
---
# Source: booklore/charts/mariadb-cluster/templates/physicalbackup.yaml
apiVersion: k8s.mariadb.com/v1alpha1
kind: PhysicalBackup
metadata:
name: booklore-mariadb-cluster-backup-garage
namespace: booklore
labels:
helm.sh/chart: mariadb-cluster-25.10.2
app.kubernetes.io/name: mariadb-cluster
app.kubernetes.io/instance: booklore
app.kubernetes.io/version: "0.0.0"
app.kubernetes.io/managed-by: Helm
spec:
mariaDbRef:
name: booklore-mariadb-cluster
namespace: booklore
compression: gzip
maxRetention: 360h
schedule:
cron: 0 0 * * *
immediate: true
suspend: false
storage:
s3:
accessKeyIdSecretKeyRef:
key: access
name: booklore-mariadb-cluster-backup-secret-garage
bucket: mariadb-backups
endpoint: garage-main.garage:3900
prefix: cl01tl/booklore
region: us-east-1
secretAccessKeySecretKeyRef:
key: secret
name: booklore-mariadb-cluster-backup-secret-garage
---
# Source: booklore/templates/replication-destination.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationDestination
metadata:
name: booklore-data-replication-destination
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-replication-destination
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
rsyncTLS:
copyMethod: Direct
accessModes: ["ReadWriteMany"]
destinationPVC: booklore-books-nfs-storage
keySecret: booklore-data-replication-secret
---
# Source: booklore/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-data-replication-source
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-replication-source
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
sourcePVC: booklore-data
trigger:
schedule: "0 0 * * *"
rsyncTLS:
keySecret: booklore-data-replication-secret
address: volsync-rsync-tls-dst-booklore-data-replication-destination
copyMethod: Snapshot
---
# Source: booklore/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-config-backup-source
namespace: booklore
labels:
app.kubernetes.io/name: booklore-config-backup-source
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
sourcePVC: booklore-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: booklore-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi
---
# Source: booklore/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-data-backup-source-local
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-backup-source-local
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
sourcePVC: booklore-data
trigger:
schedule: 0 2 * * *
restic:
pruneIntervalDays: 7
repository: booklore-data-backup-secret-local
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi
---
# Source: booklore/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-data-backup-source-remote
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-backup-source-remote
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
sourcePVC: booklore-data
trigger:
schedule: 0 3 * * *
restic:
pruneIntervalDays: 7
repository: booklore-data-backup-secret-remote
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi
---
# Source: booklore/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-data-backup-source-external
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-backup-source-external
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
sourcePVC: booklore-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: booklore-data-backup-secret-external
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi
---
# Source: booklore/charts/mariadb-cluster/templates/user.yaml
apiVersion: k8s.mariadb.com/v1alpha1
kind: User
metadata:
name: booklore-mariadb-cluster-booklore
namespace: booklore
labels:
helm.sh/chart: mariadb-cluster-25.10.2
app.kubernetes.io/name: mariadb-cluster
app.kubernetes.io/instance: booklore
app.kubernetes.io/version: "0.0.0"
app.kubernetes.io/managed-by: Helm
spec:
mariaDbRef:
name: booklore-mariadb-cluster
namespace: booklore
cleanupPolicy: Delete
host: '%'
name: booklore
passwordSecretKeyRef:
key: password
name: booklore-database-secret
requeueInterval: 10h
retryInterval: 30s

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,326 @@
---
# Source: jellyfin/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: jellyfin-nfs-storage
namespace: jellyfin
labels:
app.kubernetes.io/name: jellyfin-nfs-storage
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: jellyfin/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: jellyfin-youtube-nfs-storage
namespace: jellyfin
labels:
app.kubernetes.io/name: jellyfin-youtube-nfs-storage
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadOnlyMany
nfs:
path: /volume2/Storage/YouTube
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: jellyfin/charts/jellyfin/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: jellyfin-config
labels:
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: jellyfin
helm.sh/chart: jellyfin-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: jellyfin
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "100Gi"
storageClassName: "ceph-block"
---
# Source: jellyfin/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jellyfin-nfs-storage
namespace: jellyfin
labels:
app.kubernetes.io/name: jellyfin-nfs-storage
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
volumeName: jellyfin-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: jellyfin/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jellyfin-youtube-nfs-storage
namespace: jellyfin
labels:
app.kubernetes.io/name: jellyfin-youtube-nfs-storage
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
volumeName: jellyfin-youtube-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadOnlyMany
resources:
requests:
storage: 1Gi
---
# Source: jellyfin/charts/jellyfin/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: jellyfin
labels:
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: jellyfin
app.kubernetes.io/service: jellyfin
helm.sh/chart: jellyfin-4.4.0
namespace: jellyfin
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8096
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/name: jellyfin
---
# Source: jellyfin/charts/jellyfin/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: jellyfin
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: jellyfin
helm.sh/chart: jellyfin-4.4.0
namespace: jellyfin
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: jellyfin
app.kubernetes.io/instance: jellyfin
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/name: jellyfin
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: JELLYFIN_hostwebclient
value: "true"
- name: JELLYFIN_PublishedServerUrl
value: https://jellyfin.alexlebens.net/
image: ghcr.io/jellyfin/jellyfin:10.11.3
imagePullPolicy: IfNotPresent
name: main
resources:
limits:
gpu.intel.com/i915: 1
requests:
cpu: 1
gpu.intel.com/i915: 1
memory: 2Gi
volumeMounts:
- mountPath: /cache
name: cache
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- mountPath: /mnt/youtube
name: youtube
readOnly: true
volumes:
- emptyDir: {}
name: cache
- name: config
persistentVolumeClaim:
claimName: jellyfin-config
- name: media
persistentVolumeClaim:
claimName: jellyfin-nfs-storage
- name: youtube
persistentVolumeClaim:
claimName: jellyfin-youtube-nfs-storage
---
# Source: jellyfin/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: jellyfin-config-backup-secret
namespace: jellyfin
labels:
app.kubernetes.io/name: jellyfin-config-backup-secret
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/jellyfin/jellyfin-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: jellyfin/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-jellyfin
namespace: jellyfin
labels:
app.kubernetes.io/name: http-route-jellyfin
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- jellyfin.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: jellyfin
port: 80
weight: 100
---
# Source: jellyfin/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: jellyfin-config-backup-source
namespace: jellyfin
labels:
app.kubernetes.io/name: jellyfin-config-backup-source
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
sourcePVC: jellyfin-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: jellyfin-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi

View File

@@ -0,0 +1,711 @@
---
# Source: karakeep/charts/meilisearch/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: karakeep-meilisearch
labels:
helm.sh/chart: meilisearch-0.17.1
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/version: "v1.18.0"
app.kubernetes.io/component: search-engine
app.kubernetes.io/part-of: meilisearch
app.kubernetes.io/managed-by: Helm
---
# Source: karakeep/charts/meilisearch/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: karakeep-meilisearch-environment
labels:
helm.sh/chart: meilisearch-0.17.1
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/version: "v1.18.0"
app.kubernetes.io/component: search-engine
app.kubernetes.io/part-of: meilisearch
app.kubernetes.io/managed-by: Helm
data:
MEILI_ENV: "production"
MEILI_EXPERIMENTAL_DUMPLESS_UPGRADE: "true"
MEILI_NO_ANALYTICS: "true"
MEILI_EXPERIMENTAL_ENABLE_METRICS: "true"
---
# Source: karakeep/charts/karakeep/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: karakeep
labels:
app.kubernetes.io/instance: karakeep
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: karakeep
helm.sh/chart: karakeep-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: karakeep
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: karakeep/charts/meilisearch/templates/pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: karakeep-meilisearch
labels:
helm.sh/chart: meilisearch-0.17.1
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/version: "v1.18.0"
app.kubernetes.io/component: search-engine
app.kubernetes.io/part-of: meilisearch
app.kubernetes.io/managed-by: Helm
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: karakeep/charts/karakeep/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: karakeep
labels:
app.kubernetes.io/instance: karakeep
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: karakeep
app.kubernetes.io/service: karakeep
helm.sh/chart: karakeep-4.4.0
namespace: karakeep
spec:
type: ClusterIP
ports:
- port: 9222
targetPort: 9222
protocol: TCP
name: chrome
- port: 3000
targetPort: 3000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: karakeep
app.kubernetes.io/name: karakeep
---
# Source: karakeep/charts/meilisearch/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: karakeep-meilisearch
labels:
helm.sh/chart: meilisearch-0.17.1
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/version: "v1.18.0"
app.kubernetes.io/component: search-engine
app.kubernetes.io/part-of: meilisearch
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 7700
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
---
# Source: karakeep/charts/cloudflared/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: karakeep-cloudflared
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: karakeep
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cloudflared
app.kubernetes.io/version: 2025.10.0
helm.sh/chart: cloudflared-1.23.0
namespace: karakeep
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: cloudflared
app.kubernetes.io/instance: karakeep
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: karakeep
app.kubernetes.io/name: cloudflared
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- --protocol
- http2
- --no-autoupdate
- run
- --token
- $(CF_MANAGED_TUNNEL_TOKEN)
env:
- name: CF_MANAGED_TUNNEL_TOKEN
valueFrom:
secretKeyRef:
key: cf-tunnel-token
name: karakeep-cloudflared-secret
image: cloudflare/cloudflared:2025.11.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: karakeep/charts/karakeep/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: karakeep
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: karakeep
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: karakeep
helm.sh/chart: karakeep-4.4.0
namespace: karakeep
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: karakeep
app.kubernetes.io/instance: karakeep
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: karakeep
app.kubernetes.io/name: karakeep
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- --no-sandbox
- --disable-gpu
- --disable-dev-shm-usage
- --remote-debugging-address=0.0.0.0
- --remote-debugging-port=9222
- --hide-scrollbars
image: gcr.io/zenika-hub/alpine-chrome:124
imagePullPolicy: IfNotPresent
name: chrome
resources:
requests:
cpu: 10m
memory: 128Mi
- env:
- name: DATA_DIR
value: /data
- name: DB_WAL_MODE
value: "true"
- name: NEXTAUTH_URL
value: https://karakeep.alexlebens.dev/
- name: NEXTAUTH_SECRET
valueFrom:
secretKeyRef:
key: key
name: karakeep-key-secret
- name: PROMETHEUS_AUTH_TOKEN
valueFrom:
secretKeyRef:
key: prometheus-token
name: karakeep-key-secret
- name: ASSET_STORE_S3_ENDPOINT
value: http://rook-ceph-rgw-ceph-objectstore.rook-ceph.svc:80
- name: ASSET_STORE_S3_REGION
value: us-east-1
- name: ASSET_STORE_S3_BUCKET
valueFrom:
configMapKeyRef:
key: BUCKET_NAME
name: ceph-bucket-karakeep
- name: ASSET_STORE_S3_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: ceph-bucket-karakeep
- name: ASSET_STORE_S3_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: ceph-bucket-karakeep
- name: ASSET_STORE_S3_FORCE_PATH_STYLE
value: "true"
- name: MEILI_ADDR
value: http://karakeep-meilisearch.karakeep:7700
- name: MEILI_MASTER_KEY
valueFrom:
secretKeyRef:
key: MEILI_MASTER_KEY
name: karakeep-meilisearch-master-key-secret
- name: BROWSER_WEB_URL
value: http://karakeep.karakeep:9222
- name: DISABLE_SIGNUPS
value: "false"
- name: OAUTH_PROVIDER_NAME
value: Authentik
- name: OAUTH_WELLKNOWN_URL
value: https://auth.alexlebens.dev/application/o/karakeep/.well-known/openid-configuration
- name: OAUTH_SCOPE
value: openid email profile
- name: OAUTH_CLIENT_ID
valueFrom:
secretKeyRef:
key: AUTHENTIK_CLIENT_ID
name: karakeep-oidc-secret
- name: OAUTH_CLIENT_SECRET
valueFrom:
secretKeyRef:
key: AUTHENTIK_CLIENT_SECRET
name: karakeep-oidc-secret
- name: OLLAMA_BASE_URL
value: http://ollama-server-3.ollama:11434
- name: OLLAMA_KEEP_ALIVE
value: 5m
- name: INFERENCE_TEXT_MODEL
value: gemma3:4b
- name: INFERENCE_IMAGE_MODEL
value: granite3.2-vision:2b
- name: EMBEDDING_TEXT_MODEL
value: mxbai-embed-large
- name: INFERENCE_JOB_TIMEOUT_SEC
value: "720"
image: ghcr.io/karakeep-app/karakeep:0.28.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /data
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: karakeep
---
# Source: karakeep/charts/meilisearch/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: karakeep-meilisearch
labels:
helm.sh/chart: meilisearch-0.17.1
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/version: "v1.18.0"
app.kubernetes.io/component: search-engine
app.kubernetes.io/part-of: meilisearch
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
serviceName: karakeep-meilisearch
selector:
matchLabels:
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
template:
metadata:
labels:
helm.sh/chart: meilisearch-0.17.1
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/version: "v1.18.0"
app.kubernetes.io/component: search-engine
app.kubernetes.io/part-of: meilisearch
app.kubernetes.io/managed-by: Helm
annotations:
checksum/config: e3114e6f2910e1678611b9df77ee9eb63744c6e143f716dd8aa5f015391a2ef3
spec:
serviceAccountName: karakeep-meilisearch
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
volumes:
- name: tmp
emptyDir: {}
- name: data
persistentVolumeClaim:
claimName: karakeep-meilisearch
containers:
- name: meilisearch
image: "getmeili/meilisearch:v1.18.0"
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
volumeMounts:
- name: tmp
mountPath: /tmp
- name: data
mountPath: /meili_data
envFrom:
- configMapRef:
name: karakeep-meilisearch-environment
- secretRef:
name: karakeep-meilisearch-master-key-secret
ports:
- name: http
containerPort: 7700
protocol: TCP
startupProbe:
httpGet:
path: /health
port: http
periodSeconds: 1
initialDelaySeconds: 1
failureThreshold: 60
timeoutSeconds: 1
livenessProbe:
httpGet:
path: /health
port: http
periodSeconds: 10
initialDelaySeconds: 0
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /health
port: http
periodSeconds: 10
initialDelaySeconds: 0
timeoutSeconds: 10
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: karakeep/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: karakeep-key-secret
namespace: karakeep
labels:
app.kubernetes.io/name: karakeep-key-secret
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/karakeep/key
metadataPolicy: None
property: key
- secretKey: prometheus-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/karakeep/key
metadataPolicy: None
property: prometheus-token
---
# Source: karakeep/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: karakeep-oidc-secret
namespace: karakeep
labels:
app.kubernetes.io/name: karakeep-oidc-secret
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: AUTHENTIK_CLIENT_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/karakeep
metadataPolicy: None
property: client
- secretKey: AUTHENTIK_CLIENT_SECRET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/karakeep
metadataPolicy: None
property: secret
---
# Source: karakeep/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: karakeep-meilisearch-master-key-secret
namespace: karakeep
labels:
app.kubernetes.io/name: karakeep-meilisearch-master-key-secret
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: MEILI_MASTER_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/karakeep/meilisearch
metadataPolicy: None
property: MEILI_MASTER_KEY
---
# Source: karakeep/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: karakeep-cloudflared-secret
namespace: karakeep
labels:
app.kubernetes.io/name: karakeep-cloudflared-secret
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/karakeep
metadataPolicy: None
property: token
---
# Source: karakeep/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: karakeep-data-backup-secret
namespace: karakeep
labels:
app.kubernetes.io/name: karakeep-data-backup-secret
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/karakeep/karakeep-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: karakeep/templates/object-bucket-claim.yaml
apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
name: ceph-bucket-karakeep
labels:
app.kubernetes.io/name: ceph-bucket-karakeep
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
generateBucketName: bucket-karakeep
storageClassName: ceph-bucket
---
# Source: karakeep/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: karakeep-data-backup-source
namespace: karakeep
labels:
app.kubernetes.io/name: karakeep-data-backup-source
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
sourcePVC: karakeep-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: karakeep-data-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: karakeep/charts/meilisearch/templates/serviceMonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: karakeep-meilisearch
namespace: karakeep
labels:
helm.sh/chart: meilisearch-0.17.1
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/version: "v1.18.0"
app.kubernetes.io/component: search-engine
app.kubernetes.io/part-of: meilisearch
app.kubernetes.io/managed-by: Helm
spec:
jobLabel: karakeep
namespaceSelector:
matchNames:
- karakeep
selector:
matchLabels:
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
endpoints:
- port: http
path: /metrics
interval: 1m
scrapeTimeout: 10s
bearerTokenSecret:
name: karakeep-meilisearch-master-key-secret
key: MEILI_MASTER_KEY
---
# Source: karakeep/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: karakeep
namespace: karakeep
labels:
app.kubernetes.io/name: karakeep
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
endpoints:
- port: http
interval: 30s
scrapeTimeout: 15s
path: /api/metrics
authorization:
credentials:
key: prometheus-token
name: karakeep-key-secret
selector:
matchLabels:
app.kubernetes.io/name: karakeep
app.kubernetes.io/instance: karakeep
---
# Source: karakeep/charts/meilisearch/templates/tests/test-connection.yaml
apiVersion: v1
kind: Pod
metadata:
name: karakeep-meilisearch-test-connection
labels:
app.kubernetes.io/name: meilisearch
helm.sh/chart: meilisearch-0.17.1
app.kubernetes.io/instance: karakeep
app.kubernetes.io/managed-by: Helm
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['karakeep-meilisearch:7700']
restartPolicy: Never

View File

@@ -0,0 +1,928 @@
---
# Source: lidarr/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: lidarr-nfs-storage
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-nfs-storage
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: lidarr/charts/lidarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: lidarr-config
labels:
app.kubernetes.io/instance: lidarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: lidarr
helm.sh/chart: lidarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: lidarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: lidarr/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: lidarr-nfs-storage
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-nfs-storage
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
volumeName: lidarr-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: lidarr/charts/lidarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: lidarr
labels:
app.kubernetes.io/instance: lidarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: lidarr
app.kubernetes.io/service: lidarr
helm.sh/chart: lidarr-4.4.0
namespace: lidarr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8686
protocol: TCP
name: http
- port: 9792
targetPort: 9792
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: lidarr
app.kubernetes.io/name: lidarr
---
# Source: lidarr/charts/lidarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: lidarr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: lidarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: lidarr
helm.sh/chart: lidarr-4.4.0
namespace: lidarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: lidarr
app.kubernetes.io/instance: lidarr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: lidarr
app.kubernetes.io/name: lidarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 1000
runAsUser: 1000
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/lidarr:2.14.5@sha256:5e1235d00b5d1c1f60ca0d472e554a6611aef41aa7b5b6d88260214bf4809af0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- lidarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9792"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: lidarr-config
- name: media
persistentVolumeClaim:
claimName: lidarr-nfs-storage
---
# Source: lidarr/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: lidarr2-postgresql-17-cluster
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "lidarr2-postgresql-17-external-backup"
serverName: "lidarr2-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "lidarr2-postgresql-17-garage-local-backup"
serverName: "lidarr2-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "lidarr2-postgresql-17-recovery"
serverName: lidarr2-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 200m
memory: 1Gi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: lidarr2-postgresql-17-backup-1
externalClusters:
- name: lidarr2-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "lidarr2-postgresql-17-recovery"
serverName: lidarr2-postgresql-17-backup-1
---
# Source: lidarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: lidarr-config-backup-secret
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-config-backup-secret
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/lidarr2/lidarr2-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: lidarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: lidarr-postgresql-17-cluster-backup-secret
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: lidarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: lidarr-postgresql-17-cluster-backup-secret-garage
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: lidarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-lidarr
namespace: lidarr
labels:
app.kubernetes.io/name: http-route-lidarr
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- lidarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: lidarr
port: 80
weight: 100
---
# Source: lidarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "lidarr2-postgresql-17-external-backup"
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/lidarr2/lidarr2-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: lidarr-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: lidarr-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: lidarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "lidarr2-postgresql-17-garage-local-backup"
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/lidarr/lidarr2-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: lidarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: lidarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: lidarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: lidarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "lidarr2-postgresql-17-recovery"
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/lidarr/lidarr2-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: lidarr-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: lidarr-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: lidarr/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: lidarr2-postgresql-17-alert-rules
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/lidarr2-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="lidarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="lidarr"}) < 1
for: 5m
labels:
severity: critical
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="lidarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="lidarr"}) < 2
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="lidarr",pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="lidarr",pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
---
# Source: lidarr/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: lidarr
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
groups:
- name: lidarr
rules:
- alert: ExportarrAbsent
annotations:
description: Lidarr Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*lidarr.*"} == 1)
for: 5m
labels:
severity: critical
- alert: LidarrDown
annotations:
description: Lidarr service is down.
summary: Lidarr is down.
expr: |
lidarr_system_status{job=~".*lidarr.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: lidarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: lidarr-config-backup-source
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-config-backup-source
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
sourcePVC: lidarr-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: lidarr-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: lidarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "lidarr2-postgresql-17-daily-backup-scheduled-backup"
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: lidarr2-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "lidarr2-postgresql-17-external-backup"
---
# Source: lidarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "lidarr2-postgresql-17-live-backup-scheduled-backup"
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: lidarr2-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "lidarr2-postgresql-17-garage-local-backup"
---
# Source: lidarr/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: lidarr
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
selector:
matchLabels:
app.kubernetes.io/name: lidarr
app.kubernetes.io/instance: lidarr
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -0,0 +1,235 @@
---
# Source: prowlarr/charts/prowlarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: prowlarr-config
labels:
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: prowlarr
helm.sh/chart: prowlarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: prowlarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: prowlarr/charts/prowlarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: prowlarr
labels:
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: prowlarr
app.kubernetes.io/service: prowlarr
helm.sh/chart: prowlarr-4.4.0
namespace: prowlarr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 9696
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/name: prowlarr
---
# Source: prowlarr/charts/prowlarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: prowlarr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: prowlarr
helm.sh/chart: prowlarr-4.4.0
namespace: prowlarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: prowlarr
app.kubernetes.io/instance: prowlarr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/name: prowlarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 568
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 568
runAsUser: 568
supplementalGroups:
- 44
- 100
- 109
- 65539
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
image: ghcr.io/linuxserver/prowlarr:2.3.0@sha256:475853535de3de8441b87c1457c30f2e695f4831228b12b6b7274e9da409d874
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
volumes:
- name: config
persistentVolumeClaim:
claimName: prowlarr-config
---
# Source: prowlarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: prowlarr-config-backup-secret
namespace: prowlarr
labels:
app.kubernetes.io/name: prowlarr-config-backup-secret
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/part-of: prowlarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/prowlarr/prowlarr-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: prowlarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-prowlarr
namespace: prowlarr
labels:
app.kubernetes.io/name: http-route-prowlarr
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/part-of: prowlarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- prowlarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: prowlarr
port: 80
weight: 100
---
# Source: prowlarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: prowlarr-config-backup-source
namespace: prowlarr
labels:
app.kubernetes.io/name: prowlarr-config-backup-source
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/part-of: prowlarr
spec:
sourcePVC: prowlarr-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: prowlarr-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 568
runAsGroup: 568
fsGroup: 568
fsGroupChangePolicy: OnRootMismatch
supplementalGroups:
- 44
- 100
- 109
- 65539
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot

View File

@@ -0,0 +1,930 @@
---
# Source: radarr-4k/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: radarr-4k-nfs-storage
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k-nfs-storage
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: radarr-4k/charts/radarr-4k/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: radarr-4k-config
labels:
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-4k
helm.sh/chart: radarr-4k-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: radarr-4k
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "20Gi"
storageClassName: "ceph-block"
---
# Source: radarr-4k/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: radarr-4k-nfs-storage
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k-nfs-storage
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
volumeName: radarr-4k-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: radarr-4k/charts/radarr-4k/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: radarr-4k
labels:
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-4k
app.kubernetes.io/service: radarr-4k
helm.sh/chart: radarr-4k-4.4.0
namespace: radarr-4k
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 7878
protocol: TCP
name: http
- port: 9793
targetPort: 9793
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/name: radarr-4k
---
# Source: radarr-4k/charts/radarr-4k/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: radarr-4k
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-4k
helm.sh/chart: radarr-4k-4.4.0
namespace: radarr-4k
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: radarr-4k
app.kubernetes.io/instance: radarr-4k
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/name: radarr-4k
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 1000
runAsUser: 1000
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/radarr:6.0.4@sha256:06ac318ecb95a34c7b229568dcb4271f02cb5007bb189a0dd67a2032864187ca
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- radarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9793"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: radarr-4k-config
- name: media
persistentVolumeClaim:
claimName: radarr-4k-nfs-storage
---
# Source: radarr-4k/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: radarr5-4k-postgresql-17-cluster
namespace: radarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-4k-postgresql-17
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-4k-postgresql-17-external-backup"
serverName: "radarr5-4k-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "radarr5-4k-postgresql-17-garage-local-backup"
serverName: "radarr5-4k-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-4k-postgresql-17-recovery"
serverName: radarr5-4k-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 200m
memory: 1Gi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: radarr5-4k-postgresql-17-backup-1
externalClusters:
- name: radarr5-4k-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-4k-postgresql-17-recovery"
serverName: radarr5-4k-postgresql-17-backup-1
---
# Source: radarr-4k/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-4k-config-backup-secret
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k-config-backup-secret
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/radarr5-4k/radarr5-4k-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: radarr-4k/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-4k-postgresql-17-cluster-backup-secret
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: radarr-4k/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-4k-postgresql-17-cluster-backup-secret-garage
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: radarr-4k/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-radarr-4k
namespace: radarr-4k
labels:
app.kubernetes.io/name: http-route-radarr-4k
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- radarr-4k.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: radarr-4k
port: 80
weight: 100
---
# Source: radarr-4k/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-4k-postgresql-17-external-backup"
namespace: radarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-4k-postgresql-17
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/radarr5-4k/radarr5-4k-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: radarr-4k-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-4k-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: radarr-4k/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-4k-postgresql-17-garage-local-backup"
namespace: radarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-4k-postgresql-17
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr-4k/radarr5-4k-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: radarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: radarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: radarr-4k/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-4k-postgresql-17-recovery"
namespace: radarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-4k-postgresql-17
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr5-4k/radarr5-4k-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: radarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: radarr-4k/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr5-4k-postgresql-17-alert-rules
namespace: radarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-4k-postgresql-17
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/radarr5-4k-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-4k"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-4k"}) < 1
for: 5m
labels:
severity: critical
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-4k"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-4k"}) < 2
for: 5m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="radarr-4k",pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="radarr-4k",pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
---
# Source: radarr-4k/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr-4k
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
groups:
- name: radarr-4k
rules:
- alert: ExportarrAbsent
annotations:
description: Radarr 4K Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*radarr-4k.*"} == 1)
for: 5m
labels:
severity: critical
- alert: Radarr4kDown
annotations:
description: Radarr 4K service is down.
summary: Radarr 4K is down.
expr: |
radarr_4k_system_status{job=~".*radarr-4k.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: radarr-4k/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: radarr-4k-config-backup-source
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k-config-backup-source
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
sourcePVC: radarr-4k-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: radarr-4k-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: radarr-4k/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-4k-postgresql-17-daily-backup-scheduled-backup"
namespace: radarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-4k-postgresql-17
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-4k-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-4k-postgresql-17-external-backup"
---
# Source: radarr-4k/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-4k-postgresql-17-live-backup-scheduled-backup"
namespace: radarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-4k-postgresql-17
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-4k-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-4k-postgresql-17-garage-local-backup"
---
# Source: radarr-4k/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: radarr-4k
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
selector:
matchLabels:
app.kubernetes.io/name: radarr-4k
app.kubernetes.io/instance: radarr-4k
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -0,0 +1,928 @@
---
# Source: radarr-anime/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: radarr-anime-nfs-storage
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime-nfs-storage
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: radarr-anime/charts/radarr-anime/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: radarr-anime-config
labels:
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-anime
helm.sh/chart: radarr-anime-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: radarr-anime
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "20Gi"
storageClassName: "ceph-block"
---
# Source: radarr-anime/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: radarr-anime-nfs-storage
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime-nfs-storage
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
volumeName: radarr-anime-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: radarr-anime/charts/radarr-anime/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: radarr-anime
labels:
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-anime
app.kubernetes.io/service: radarr-anime
helm.sh/chart: radarr-anime-4.4.0
namespace: radarr-anime
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 7878
protocol: TCP
name: http
- port: 9793
targetPort: 9793
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/name: radarr-anime
---
# Source: radarr-anime/charts/radarr-anime/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: radarr-anime
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-anime
helm.sh/chart: radarr-anime-4.4.0
namespace: radarr-anime
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: radarr-anime
app.kubernetes.io/instance: radarr-anime
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/name: radarr-anime
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/radarr:6.0.4@sha256:06ac318ecb95a34c7b229568dcb4271f02cb5007bb189a0dd67a2032864187ca
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- radarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9793"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: radarr-anime-config
- name: media
persistentVolumeClaim:
claimName: radarr-anime-nfs-storage
---
# Source: radarr-anime/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: radarr5-anime-postgresql-17-cluster
namespace: radarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-anime-postgresql-17
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-anime-postgresql-17-external-backup"
serverName: "radarr5-anime-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "radarr5-anime-postgresql-17-garage-local-backup"
serverName: "radarr5-anime-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-anime-postgresql-17-recovery"
serverName: radarr5-anime-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: radarr5-anime-postgresql-17-backup-1
externalClusters:
- name: radarr5-anime-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-anime-postgresql-17-recovery"
serverName: radarr5-anime-postgresql-17-backup-1
---
# Source: radarr-anime/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-anime-config-backup-secret
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime-config-backup-secret
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/radarr5-anime/radarr5-anime-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: radarr-anime/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-anime-postgresql-17-cluster-backup-secret
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: radarr-anime/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-anime-postgresql-17-cluster-backup-secret-garage
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: radarr-anime/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-radarr-anime
namespace: radarr-anime
labels:
app.kubernetes.io/name: http-route-radarr-anime
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- radarr-anime.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: radarr-anime
port: 80
weight: 100
---
# Source: radarr-anime/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-anime-postgresql-17-external-backup"
namespace: radarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-anime-postgresql-17
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/radarr5-anime/radarr5-anime-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: radarr-anime-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-anime-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: radarr-anime/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-anime-postgresql-17-garage-local-backup"
namespace: radarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-anime-postgresql-17
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr-anime/radarr5-anime-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: radarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: radarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: radarr-anime/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-anime-postgresql-17-recovery"
namespace: radarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-anime-postgresql-17
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr5-anime/radarr5-anime-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: radarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: radarr-anime/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr5-anime-postgresql-17-alert-rules
namespace: radarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-anime-postgresql-17
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/radarr5-anime-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-anime"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-anime"}) < 1
for: 5m
labels:
severity: critical
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-anime"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-anime"}) < 2
for: 5m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="radarr-anime",pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="radarr-anime",pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
---
# Source: radarr-anime/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr-anime
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
groups:
- name: radarr-anime
rules:
- alert: ExportarrAbsent
annotations:
description: Radarr Anime Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*radarr-anime.*"} == 1)
for: 5m
labels:
severity: critical
- alert: RadarrAnimeDown
annotations:
description: Radarr Anime service is down.
summary: Radarr Anime is down.
expr: |
radarr_anime_system_status{job=~".*radarr-anime.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: radarr-anime/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: radarr-anime-config-backup-source
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime-config-backup-source
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
sourcePVC: radarr-anime-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: radarr-anime-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: radarr-anime/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-anime-postgresql-17-daily-backup-scheduled-backup"
namespace: radarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-anime-postgresql-17
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-anime-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-anime-postgresql-17-external-backup"
---
# Source: radarr-anime/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-anime-postgresql-17-live-backup-scheduled-backup"
namespace: radarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-anime-postgresql-17
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-anime-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-anime-postgresql-17-garage-local-backup"
---
# Source: radarr-anime/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: radarr-anime
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
selector:
matchLabels:
app.kubernetes.io/name: radarr-anime
app.kubernetes.io/instance: radarr-anime
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -0,0 +1,928 @@
---
# Source: radarr-standup/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: radarr-standup-nfs-storage
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup-nfs-storage
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: radarr-standup/charts/radarr-standup/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: radarr-standup-config
labels:
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-standup
helm.sh/chart: radarr-standup-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: radarr-standup
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "20Gi"
storageClassName: "ceph-block"
---
# Source: radarr-standup/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: radarr-standup-nfs-storage
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup-nfs-storage
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
volumeName: radarr-standup-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: radarr-standup/charts/radarr-standup/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: radarr-standup
labels:
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-standup
app.kubernetes.io/service: radarr-standup
helm.sh/chart: radarr-standup-4.4.0
namespace: radarr-standup
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 7878
protocol: TCP
name: http
- port: 9793
targetPort: 9793
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/name: radarr-standup
---
# Source: radarr-standup/charts/radarr-standup/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: radarr-standup
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-standup
helm.sh/chart: radarr-standup-4.4.0
namespace: radarr-standup
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: radarr-standup
app.kubernetes.io/instance: radarr-standup
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/name: radarr-standup
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/radarr:6.0.4@sha256:06ac318ecb95a34c7b229568dcb4271f02cb5007bb189a0dd67a2032864187ca
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- radarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9793"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: radarr-standup-config
- name: media
persistentVolumeClaim:
claimName: radarr-standup-nfs-storage
---
# Source: radarr-standup/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: radarr5-standup-postgresql-17-cluster
namespace: radarr-standup
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-standup-postgresql-17
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-standup-postgresql-17-external-backup"
serverName: "radarr5-standup-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "radarr5-standup-postgresql-17-garage-local-backup"
serverName: "radarr5-standup-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-standup-postgresql-17-recovery"
serverName: radarr5-standup-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: radarr5-standup-postgresql-17-backup-1
externalClusters:
- name: radarr5-standup-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-standup-postgresql-17-recovery"
serverName: radarr5-standup-postgresql-17-backup-1
---
# Source: radarr-standup/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-standup-config-backup-secret
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup-config-backup-secret
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/radarr5-standup/radarr5-standup-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: radarr-standup/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-standup-postgresql-17-cluster-backup-secret
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: radarr-standup/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-standup-postgresql-17-cluster-backup-secret-garage
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: radarr-standup/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-radarr-standup
namespace: radarr-standup
labels:
app.kubernetes.io/name: http-route-radarr-standup
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- radarr-standup.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: radarr-standup
port: 80
weight: 100
---
# Source: radarr-standup/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-standup-postgresql-17-external-backup"
namespace: radarr-standup
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-standup-postgresql-17
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/radarr5-standup/radarr5-standup-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: radarr-standup-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-standup-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: radarr-standup/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-standup-postgresql-17-garage-local-backup"
namespace: radarr-standup
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-standup-postgresql-17
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr-standup/radarr5-standup-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: radarr-standup-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-standup-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: radarr-standup-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: radarr-standup/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-standup-postgresql-17-recovery"
namespace: radarr-standup
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-standup-postgresql-17
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr5-standup/radarr5-standup-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: radarr-standup-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-standup-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: radarr-standup/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr5-standup-postgresql-17-alert-rules
namespace: radarr-standup
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-standup-postgresql-17
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/radarr5-standup-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-standup"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-standup"}) < 1
for: 5m
labels:
severity: critical
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-standup"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-standup"}) < 2
for: 5m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="radarr-standup",pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="radarr-standup",pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
---
# Source: radarr-standup/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr-standup
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
groups:
- name: radarr-standup
rules:
- alert: ExportarrAbsent
annotations:
description: Radarr Stand Up Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*radarr-standup.*"} == 1)
for: 5m
labels:
severity: critical
- alert: RadarrStandUpDown
annotations:
description: Radarr Stand Up service is down.
summary: Radarr Stand Up is down.
expr: |
radarr_standup_system_status{job=~".*radarr-standup.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: radarr-standup/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: radarr-standup-config-backup-source
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup-config-backup-source
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
sourcePVC: radarr-standup-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: radarr-standup-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: radarr-standup/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-standup-postgresql-17-daily-backup-scheduled-backup"
namespace: radarr-standup
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-standup-postgresql-17
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-standup-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-standup-postgresql-17-external-backup"
---
# Source: radarr-standup/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-standup-postgresql-17-live-backup-scheduled-backup"
namespace: radarr-standup
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-standup-postgresql-17
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-standup-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-standup-postgresql-17-garage-local-backup"
---
# Source: radarr-standup/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: radarr-standup
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
selector:
matchLabels:
app.kubernetes.io/name: radarr-standup
app.kubernetes.io/instance: radarr-standup
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -0,0 +1,930 @@
---
# Source: radarr/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: radarr-nfs-storage
namespace: radarr
labels:
app.kubernetes.io/name: radarr-nfs-storage
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: radarr/charts/radarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: radarr-config
labels:
app.kubernetes.io/instance: radarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr
helm.sh/chart: radarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: radarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "20Gi"
storageClassName: "ceph-block"
---
# Source: radarr/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: radarr-nfs-storage
namespace: radarr
labels:
app.kubernetes.io/name: radarr-nfs-storage
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
volumeName: radarr-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: radarr/charts/radarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: radarr
labels:
app.kubernetes.io/instance: radarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr
app.kubernetes.io/service: radarr
helm.sh/chart: radarr-4.4.0
namespace: radarr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 7878
protocol: TCP
name: http
- port: 9793
targetPort: 9793
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr
app.kubernetes.io/name: radarr
---
# Source: radarr/charts/radarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: radarr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr
helm.sh/chart: radarr-4.4.0
namespace: radarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: radarr
app.kubernetes.io/instance: radarr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr
app.kubernetes.io/name: radarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 1000
runAsUser: 1000
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/radarr:6.0.4@sha256:06ac318ecb95a34c7b229568dcb4271f02cb5007bb189a0dd67a2032864187ca
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- radarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9793"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: radarr-config
- name: media
persistentVolumeClaim:
claimName: radarr-nfs-storage
---
# Source: radarr/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: radarr5-postgresql-17-cluster
namespace: radarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-postgresql-17
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-postgresql-17-external-backup"
serverName: "radarr5-postgresql-17-backup-2"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "radarr5-postgresql-17-garage-local-backup"
serverName: "radarr5-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-postgresql-17-recovery"
serverName: radarr5-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 200m
memory: 1Gi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: radarr5-postgresql-17-backup-1
externalClusters:
- name: radarr5-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-postgresql-17-recovery"
serverName: radarr5-postgresql-17-backup-1
---
# Source: radarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-config-backup-secret
namespace: radarr
labels:
app.kubernetes.io/name: radarr-config-backup-secret
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/radarr5/radarr5-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: radarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-postgresql-17-cluster-backup-secret
namespace: radarr
labels:
app.kubernetes.io/name: radarr-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: radarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-postgresql-17-cluster-backup-secret-garage
namespace: radarr
labels:
app.kubernetes.io/name: radarr-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: radarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-radarr
namespace: radarr
labels:
app.kubernetes.io/name: http-route-radarr
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- radarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: radarr
port: 80
weight: 100
---
# Source: radarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-postgresql-17-external-backup"
namespace: radarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-postgresql-17
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/radarr5/radarr5-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: radarr-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: radarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-postgresql-17-garage-local-backup"
namespace: radarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-postgresql-17
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr/radarr5-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: radarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: radarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: radarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-postgresql-17-recovery"
namespace: radarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-postgresql-17
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr5/radarr5-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: radarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: radarr/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr5-postgresql-17-alert-rules
namespace: radarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-postgresql-17
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/radarr5-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr"}) < 1
for: 5m
labels:
severity: critical
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr"}) < 2
for: 5m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="radarr",pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="radarr",pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
---
# Source: radarr/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr
namespace: radarr
labels:
app.kubernetes.io/name: radarr
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
groups:
- name: radarr
rules:
- alert: ExportarrAbsent
annotations:
description: Radarr Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*radarr.*"} == 1)
for: 5m
labels:
severity: critical
- alert: RadarrDown
annotations:
description: Radarr service is down.
summary: Radarr is down.
expr: |
radarr_system_status{job=~".*radarr.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: radarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: radarr-config-backup-source
namespace: radarr
labels:
app.kubernetes.io/name: radarr-config-backup-source
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
sourcePVC: radarr-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: radarr-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: radarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-postgresql-17-daily-backup-scheduled-backup"
namespace: radarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-postgresql-17
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-postgresql-17-external-backup"
---
# Source: radarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-postgresql-17-live-backup-scheduled-backup"
namespace: radarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-postgresql-17
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-postgresql-17-garage-local-backup"
---
# Source: radarr/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: radarr
namespace: radarr
labels:
app.kubernetes.io/name: radarr
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
selector:
matchLabels:
app.kubernetes.io/name: radarr
app.kubernetes.io/instance: radarr
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -4,7 +4,7 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: searxng-browser-data
name: searxng-api-data
labels:
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
@@ -23,7 +23,7 @@ spec:
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: searxng-api-data
name: searxng-browser-data
labels:
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
@@ -89,71 +89,6 @@ spec:
# Source: searxng/charts/searxng/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: searxng-browser
labels:
app.kubernetes.io/controller: browser
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: browser
app.kubernetes.io/name: searxng
app.kubernetes.io/instance: searxng
template:
metadata:
labels:
app.kubernetes.io/controller: browser
app.kubernetes.io/instance: searxng
app.kubernetes.io/name: searxng
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: SEARXNG_BASE_URL
value: https://searxng.alexlebens.net/
- name: SEARXNG_QUERY_URL
value: https://searxng.alexlebens.net/search?q=<query>
- name: SEARXNG_HOSTNAME
value: searxng.alexlebens.net
- name: SEARXNG_REDIS_URL
value: redis://redis-replication-searxng-master.searxng:6379/0
- name: UWSGI_WORKERS
value: "4"
- name: UWSGI_THREADS
value: "4"
image: searxng/searxng:latest@sha256:faa7118f9167c2c1e09a3fbb9bd87eee0905d76456d297e62e815646afc97037
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /etc/searxng
name: browser-data
volumes:
- name: browser-data
persistentVolumeClaim:
claimName: searxng-browser-data
---
# Source: searxng/charts/searxng/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: searxng-api
labels:
@@ -235,6 +170,71 @@ spec:
secret:
secretName: searxng-api-config-secret
---
# Source: searxng/charts/searxng/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: searxng-browser
labels:
app.kubernetes.io/controller: browser
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: browser
app.kubernetes.io/name: searxng
app.kubernetes.io/instance: searxng
template:
metadata:
labels:
app.kubernetes.io/controller: browser
app.kubernetes.io/instance: searxng
app.kubernetes.io/name: searxng
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: SEARXNG_BASE_URL
value: https://searxng.alexlebens.net/
- name: SEARXNG_QUERY_URL
value: https://searxng.alexlebens.net/search?q=<query>
- name: SEARXNG_HOSTNAME
value: searxng.alexlebens.net
- name: SEARXNG_REDIS_URL
value: redis://redis-replication-searxng-master.searxng:6379/0
- name: UWSGI_WORKERS
value: "4"
- name: UWSGI_THREADS
value: "4"
image: searxng/searxng:latest@sha256:faa7118f9167c2c1e09a3fbb9bd87eee0905d76456d297e62e815646afc97037
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /etc/searxng
name: browser-data
volumes:
- name: browser-data
persistentVolumeClaim:
claimName: searxng-browser-data
---
# Source: searxng/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret

View File

@@ -0,0 +1,928 @@
---
# Source: sonarr-4k/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: sonarr-4k-nfs-storage
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k-nfs-storage
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: sonarr-4k/charts/sonarr-4k/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: sonarr-4k-config
labels:
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr-4k
helm.sh/chart: sonarr-4k-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: sonarr-4k
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "20Gi"
storageClassName: "ceph-block"
---
# Source: sonarr-4k/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: sonarr-4k-nfs-storage
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k-nfs-storage
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
volumeName: sonarr-4k-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: sonarr-4k/charts/sonarr-4k/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: sonarr-4k
labels:
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr-4k
app.kubernetes.io/service: sonarr-4k
helm.sh/chart: sonarr-4k-4.4.0
namespace: sonarr-4k
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8989
protocol: TCP
name: http
- port: 9794
targetPort: 9794
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/name: sonarr-4k
---
# Source: sonarr-4k/charts/sonarr-4k/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sonarr-4k
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr-4k
helm.sh/chart: sonarr-4k-4.4.0
namespace: sonarr-4k
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: sonarr-4k
app.kubernetes.io/instance: sonarr-4k
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/name: sonarr-4k
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/sonarr:4.0.16@sha256:60e5edcac39172294ad22d55d1b08c2c0a9fe658cad2f2c4d742ae017d7874de
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- sonarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9794"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: sonarr-4k-config
- name: media
persistentVolumeClaim:
claimName: sonarr-4k-nfs-storage
---
# Source: sonarr-4k/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: sonarr4-4k-postgresql-17-cluster
namespace: sonarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-4k-postgresql-17
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "sonarr4-4k-postgresql-17-external-backup"
serverName: "sonarr4-4k-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "sonarr4-4k-postgresql-17-garage-local-backup"
serverName: "sonarr4-4k-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-4k-postgresql-17-recovery"
serverName: sonarr4-4k-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 512Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: sonarr4-4k-postgresql-17-backup-1
externalClusters:
- name: sonarr4-4k-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "sonarr4-4k-postgresql-17-recovery"
serverName: sonarr4-4k-postgresql-17-backup-1
---
# Source: sonarr-4k/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-4k-config-backup-secret
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k-config-backup-secret
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/sonarr4-4k/sonarr4-4k-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: sonarr-4k/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-4k-postgresql-17-cluster-backup-secret
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: sonarr-4k/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-4k-postgresql-17-cluster-backup-secret-garage
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: sonarr-4k/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-sonarr-4k
namespace: sonarr-4k
labels:
app.kubernetes.io/name: http-route-sonarr-4k
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- sonarr-4k.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: sonarr-4k
port: 80
weight: 100
---
# Source: sonarr-4k/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-4k-postgresql-17-external-backup"
namespace: sonarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-4k-postgresql-17
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/sonarr4-4k/sonarr4-4k-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: sonarr-4k-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-4k-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: sonarr-4k/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-4k-postgresql-17-garage-local-backup"
namespace: sonarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-4k-postgresql-17
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/sonarr-4k/sonarr4-4k-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: sonarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: sonarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: sonarr-4k/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-4k-postgresql-17-recovery"
namespace: sonarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-4k-postgresql-17
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/sonarr-4k/sonarr4-4k-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: sonarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: sonarr-4k/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: sonarr4-4k-postgresql-17-alert-rules
namespace: sonarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-4k-postgresql-17
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/sonarr4-4k-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr-4k"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr-4k"}) < 1
for: 5m
labels:
severity: critical
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr-4k"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr-4k"}) < 2
for: 5m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="sonarr-4k",pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="sonarr-4k",pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
---
# Source: sonarr-4k/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: sonarr-4k
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
groups:
- name: sonarr-4k
rules:
- alert: ExportarrAbsent
annotations:
description: Sonarr 4K Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*sonarr-4k.*"} == 1)
for: 5m
labels:
severity: critical
- alert: Sonarr4KDown
annotations:
description: Sonarr 4K service is down.
summary: Sonarr 4K is down.
expr: |
sonarr_4k_system_status{job=~".*sonarr-4k.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: sonarr-4k/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: sonarr-4k-config-backup-source
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k-config-backup-source
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
sourcePVC: sonarr-4k-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: sonarr-4k-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: sonarr-4k/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "sonarr4-4k-postgresql-17-daily-backup-scheduled-backup"
namespace: sonarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-4k-postgresql-17
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: sonarr4-4k-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-4k-postgresql-17-external-backup"
---
# Source: sonarr-4k/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "sonarr4-4k-postgresql-17-live-backup-scheduled-backup"
namespace: sonarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-4k-postgresql-17
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: sonarr4-4k-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-4k-postgresql-17-garage-local-backup"
---
# Source: sonarr-4k/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: sonarr-4k
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
selector:
matchLabels:
app.kubernetes.io/name: sonarr-4k
app.kubernetes.io/instance: sonarr-4k
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -0,0 +1,928 @@
---
# Source: sonarr-anime/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: sonarr-anime-nfs-storage
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime-nfs-storage
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: sonarr-anime/charts/sonarr-anime/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: sonarr-anime-config
labels:
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr-anime
helm.sh/chart: sonarr-anime-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: sonarr-anime
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "20Gi"
storageClassName: "ceph-block"
---
# Source: sonarr-anime/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: sonarr-anime-nfs-storage
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime-nfs-storage
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
volumeName: sonarr-anime-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: sonarr-anime/charts/sonarr-anime/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: sonarr-anime
labels:
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr-anime
app.kubernetes.io/service: sonarr-anime
helm.sh/chart: sonarr-anime-4.4.0
namespace: sonarr-anime
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8989
protocol: TCP
name: http
- port: 9794
targetPort: 9794
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/name: sonarr-anime
---
# Source: sonarr-anime/charts/sonarr-anime/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sonarr-anime
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr-anime
helm.sh/chart: sonarr-anime-4.4.0
namespace: sonarr-anime
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: sonarr-anime
app.kubernetes.io/instance: sonarr-anime
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/name: sonarr-anime
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/sonarr:4.0.16@sha256:60e5edcac39172294ad22d55d1b08c2c0a9fe658cad2f2c4d742ae017d7874de
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- sonarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9794"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: sonarr-anime-config
- name: media
persistentVolumeClaim:
claimName: sonarr-anime-nfs-storage
---
# Source: sonarr-anime/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: sonarr4-anime-postgresql-17-cluster
namespace: sonarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-anime-postgresql-17
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "sonarr4-anime-postgresql-17-external-backup"
serverName: "sonarr4-anime-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "sonarr4-anime-postgresql-17-garage-local-backup"
serverName: "sonarr4-anime-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-anime-postgresql-17-recovery"
serverName: sonarr4-anime-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 512Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: sonarr4-anime-postgresql-17-backup-1
externalClusters:
- name: sonarr4-anime-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "sonarr4-anime-postgresql-17-recovery"
serverName: sonarr4-anime-postgresql-17-backup-1
---
# Source: sonarr-anime/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-anime-config-backup-secret
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime-config-backup-secret
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/sonarr4-anime/sonarr4-anime-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: sonarr-anime/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-anime-postgresql-17-cluster-backup-secret
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: sonarr-anime/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-anime-postgresql-17-cluster-backup-secret-garage
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: sonarr-anime/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-sonarr-anime
namespace: sonarr-anime
labels:
app.kubernetes.io/name: http-route-sonarr-anime
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- sonarr-anime.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: sonarr-anime
port: 80
weight: 100
---
# Source: sonarr-anime/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-anime-postgresql-17-external-backup"
namespace: sonarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-anime-postgresql-17
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/sonarr4-anime/sonarr4-anime-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: sonarr-anime-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-anime-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: sonarr-anime/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-anime-postgresql-17-garage-local-backup"
namespace: sonarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-anime-postgresql-17
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/sonarr-anime/sonarr4-anime-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: sonarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: sonarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: sonarr-anime/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-anime-postgresql-17-recovery"
namespace: sonarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-anime-postgresql-17
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/sonarr-anime/sonarr4-anime-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: sonarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: sonarr-anime/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: sonarr4-anime-postgresql-17-alert-rules
namespace: sonarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-anime-postgresql-17
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/sonarr4-anime-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr-anime"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr-anime"}) < 1
for: 5m
labels:
severity: critical
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr-anime"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr-anime"}) < 2
for: 5m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="sonarr-anime",pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="sonarr-anime",pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
---
# Source: sonarr-anime/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: sonarr-anime
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
groups:
- name: sonarr-anime
rules:
- alert: ExportarrAbsent
annotations:
description: Sonarr Anime Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*sonarr-anime.*"} == 1)
for: 5m
labels:
severity: critical
- alert: SonarrAnimeDown
annotations:
description: Sonarr Anime service is down.
summary: Sonarr Anime is down.
expr: |
sonarr_anime_system_status{job=~".*sonarr-anime.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: sonarr-anime/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: sonarr-anime-config-backup-source
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime-config-backup-source
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
sourcePVC: sonarr-anime-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: sonarr-anime-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: sonarr-anime/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "sonarr4-anime-postgresql-17-daily-backup-scheduled-backup"
namespace: sonarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-anime-postgresql-17
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: sonarr4-anime-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-anime-postgresql-17-external-backup"
---
# Source: sonarr-anime/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "sonarr4-anime-postgresql-17-live-backup-scheduled-backup"
namespace: sonarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-anime-postgresql-17
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: sonarr4-anime-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-anime-postgresql-17-garage-local-backup"
---
# Source: sonarr-anime/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: sonarr-anime
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
selector:
matchLabels:
app.kubernetes.io/name: sonarr-anime
app.kubernetes.io/instance: sonarr-anime
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -0,0 +1,928 @@
---
# Source: sonarr/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: sonarr-nfs-storage
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr-nfs-storage
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: sonarr/charts/sonarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: sonarr-config
labels:
app.kubernetes.io/instance: sonarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr
helm.sh/chart: sonarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: sonarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "20Gi"
storageClassName: "ceph-block"
---
# Source: sonarr/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: sonarr-nfs-storage
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr-nfs-storage
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
volumeName: sonarr-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: sonarr/charts/sonarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: sonarr
labels:
app.kubernetes.io/instance: sonarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr
app.kubernetes.io/service: sonarr
helm.sh/chart: sonarr-4.4.0
namespace: sonarr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8989
protocol: TCP
name: http
- port: 9794
targetPort: 9794
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr
app.kubernetes.io/name: sonarr
---
# Source: sonarr/charts/sonarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sonarr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr
helm.sh/chart: sonarr-4.4.0
namespace: sonarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: sonarr
app.kubernetes.io/instance: sonarr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr
app.kubernetes.io/name: sonarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/sonarr:4.0.16@sha256:60e5edcac39172294ad22d55d1b08c2c0a9fe658cad2f2c4d742ae017d7874de
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- sonarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9794"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: sonarr-config
- name: media
persistentVolumeClaim:
claimName: sonarr-nfs-storage
---
# Source: sonarr/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: sonarr4-postgresql-17-cluster
namespace: sonarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-postgresql-17
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "sonarr4-postgresql-17-external-backup"
serverName: "sonarr4-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "sonarr4-postgresql-17-garage-local-backup"
serverName: "sonarr4-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-postgresql-17-recovery"
serverName: sonarr4-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 200m
memory: 1Gi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: sonarr4-postgresql-17-backup-1
externalClusters:
- name: sonarr4-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "sonarr4-postgresql-17-recovery"
serverName: sonarr4-postgresql-17-backup-1
---
# Source: sonarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-config-backup-secret
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr-config-backup-secret
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/sonarr4/sonarr4-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: sonarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-postgresql-17-cluster-backup-secret
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: sonarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-postgresql-17-cluster-backup-secret-garage
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: sonarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-sonarr
namespace: sonarr
labels:
app.kubernetes.io/name: http-route-sonarr
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- sonarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: sonarr
port: 80
weight: 100
---
# Source: sonarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-postgresql-17-external-backup"
namespace: sonarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-postgresql-17
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/sonarr4/sonarr4-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: sonarr-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: sonarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-postgresql-17-garage-local-backup"
namespace: sonarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-postgresql-17
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/sonarr/sonarr4-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: sonarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: sonarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: sonarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-postgresql-17-recovery"
namespace: sonarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-postgresql-17
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/sonarr/sonarr4-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: sonarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: sonarr/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: sonarr4-postgresql-17-alert-rules
namespace: sonarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-postgresql-17
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/sonarr4-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr"}) < 1
for: 5m
labels:
severity: critical
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr"}) < 2
for: 5m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="sonarr",pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="sonarr",pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
---
# Source: sonarr/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: sonarr
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
groups:
- name: sonarr
rules:
- alert: ExportarrAbsent
annotations:
description: Sonarr Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*sonarr.*"} == 1)
for: 5m
labels:
severity: critical
- alert: SonarrDown
annotations:
description: Sonarr service is down.
summary: Sonarr is down.
expr: |
sonarr_system_status{job=~".*sonarr.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: sonarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: sonarr-config-backup-source
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr-config-backup-source
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
sourcePVC: sonarr-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: sonarr-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: sonarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "sonarr4-postgresql-17-daily-backup-scheduled-backup"
namespace: sonarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-postgresql-17
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: sonarr4-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-postgresql-17-external-backup"
---
# Source: sonarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "sonarr4-postgresql-17-live-backup-scheduled-backup"
namespace: sonarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-postgresql-17
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: sonarr4-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-postgresql-17-garage-local-backup"
---
# Source: sonarr/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: sonarr
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
selector:
matchLabels:
app.kubernetes.io/name: sonarr
app.kubernetes.io/instance: sonarr
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -0,0 +1,441 @@
---
# Source: tubearchivist/templates/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: tubearchivist
labels:
app.kubernetes.io/name: tubearchivist
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged
---
# Source: tubearchivist/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: tubearchivist-nfs-storage
namespace: tubearchivist
labels:
app.kubernetes.io/name: tubearchivist-nfs-storage
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/YouTube
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: tubearchivist/charts/tubearchivist/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: tubearchivist
labels:
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tubearchivist
helm.sh/chart: tubearchivist-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: tubearchivist
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "40Gi"
storageClassName: "ceph-block"
---
# Source: tubearchivist/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: tubearchivist-nfs-storage
namespace: tubearchivist
labels:
app.kubernetes.io/name: tubearchivist-nfs-storage
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
volumeName: tubearchivist-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: tubearchivist/charts/tubearchivist/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: tubearchivist
labels:
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tubearchivist
app.kubernetes.io/service: tubearchivist
helm.sh/chart: tubearchivist-4.4.0
namespace: tubearchivist
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 24000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/name: tubearchivist
---
# Source: tubearchivist/charts/tubearchivist/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: tubearchivist
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tubearchivist
helm.sh/chart: tubearchivist-4.4.0
namespace: tubearchivist
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: tubearchivist
app.kubernetes.io/instance: tubearchivist
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/name: tubearchivist
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: VPN_SERVICE_PROVIDER
value: protonvpn
- name: VPN_TYPE
value: wireguard
- name: WIREGUARD_PRIVATE_KEY
valueFrom:
secretKeyRef:
key: private-key
name: tubearchivist-wireguard-conf
- name: VPN_PORT_FORWARDING
value: "on"
- name: PORT_FORWARD_ONLY
value: "on"
- name: FIREWALL_OUTBOUND_SUBNETS
value: 10.0.0.0/8
- name: FIREWALL_INPUT_PORTS
value: 80,8000,24000
- name: DOT
value: "false"
- name: DNS_KEEP_NAMESERVER
value: "true"
- name: DNS_PLAINTEXT_ADDRESS
value: 10.96.0.10
image: ghcr.io/qdm12/gluetun:v3.40.3@sha256:ef4a44819a60469682c7b5e69183e6401171891feaa60186652d292c59e41b30
imagePullPolicy: IfNotPresent
name: gluetun
resources:
limits:
devic.es/tun: "1"
requests:
cpu: 10m
devic.es/tun: "1"
memory: 128Mi
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
privileged: true
- env:
- name: TZ
value: US/Central
- name: HOST_UID
value: "1000"
- name: HOST_GID
value: "1000"
- name: ES_URL
value: https://elasticsearch-tubearchivist-es-http.tubearchivist:9200
- name: ES_DISABLE_VERIFY_SSL
value: "true"
- name: REDIS_CON
value: redis://redis-replication-tubearchivist-master.tubearchivist:6379
- name: TA_HOST
value: https://tubearchivist.alexlebens.net http://tubearchivist.tubearchivist:80/
- name: TA_PORT
value: "24000"
- name: TA_USERNAME
value: admin
envFrom:
- secretRef:
name: tubearchivist-config-secret
image: bbilly1/tubearchivist:v0.5.8
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 1Gi
volumeMounts:
- mountPath: /cache
name: data
- mountPath: /youtube
name: youtube
volumes:
- name: data
persistentVolumeClaim:
claimName: tubearchivist
- name: youtube
persistentVolumeClaim:
claimName: tubearchivist-nfs-storage
---
# Source: tubearchivist/templates/elasticsearch.yaml
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: elasticsearch-tubearchivist
namespace: tubearchivist
labels:
app.kubernetes.io/name: elasticsearch-tubearchivist
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
version: 8.18.0
auth:
fileRealm:
- secretName: tubearchivist-elasticsearch-secret
nodeSets:
- name: default
count: 1
config:
node.store.allow_mmap: false
path.repo: /usr/share/elasticsearch/data/snapshot
podTemplate:
spec:
volumes:
- name: tubearchivist-snapshot-nfs-storage
nfs:
path: /volume2/Storage/TubeArchivist
server: synologybond.alexlebens.net
containers:
- name: elasticsearch
volumeMounts:
- name: tubearchivist-snapshot-nfs-storage
mountPath: /usr/share/elasticsearch/data/snapshot
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: ceph-block
---
# Source: tubearchivist/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tubearchivist-config-secret
namespace: tubearchivist
labels:
app.kubernetes.io/name: tubearchivist-config-secret
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ELASTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/tubearchivist/env
metadataPolicy: None
property: ELASTIC_PASSWORD
- secretKey: TA_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/tubearchivist/env
metadataPolicy: None
property: TA_PASSWORD
---
# Source: tubearchivist/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tubearchivist-elasticsearch-secret
namespace: tubearchivist
labels:
app.kubernetes.io/name: tubearchivist-elasticsearch-secret
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: username
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/tubearchivist/elasticsearch
metadataPolicy: None
property: username
- secretKey: password
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/tubearchivist/elasticsearch
metadataPolicy: None
property: password
- secretKey: roles
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/tubearchivist/elasticsearch
metadataPolicy: None
property: roles
---
# Source: tubearchivist/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tubearchivist-wireguard-conf
namespace: tubearchivist
labels:
app.kubernetes.io/name: tubearchivist-wireguard-conf
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: private-key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /protonvpn/conf/cl01tl
metadataPolicy: None
property: private-key
---
# Source: tubearchivist/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-tubearchivist
namespace: tubearchivist
labels:
app.kubernetes.io/name: http-route-tubearchivist
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- tubearchivist.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: tubearchivist
port: 80
weight: 100
---
# Source: tubearchivist/templates/redis-replication.yaml
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: redis-replication-tubearchivist
namespace: tubearchivist
labels:
app.kubernetes.io/name: redis-replication-tubearchivist
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v8.0.3
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 50m
memory: 128Mi
storage:
volumeClaimTemplate:
spec:
storageClassName: ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.48.0
---
# Source: tubearchivist/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-replication-tubearchivist
namespace: tubearchivist
labels:
app.kubernetes.io/name: redis-replication-tubearchivist
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
redis-operator: "true"
env: production
spec:
selector:
matchLabels:
redis_setup_type: replication
endpoints:
- port: redis-exporter
interval: 30s
scrapeTimeout: 10s

View File

@@ -0,0 +1,846 @@
---
# Source: vaultwarden/charts/vaultwarden/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: vaultwarden-data
labels:
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: vaultwarden
helm.sh/chart: vaultwarden-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: vaultwarden
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: vaultwarden/charts/vaultwarden/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: vaultwarden
labels:
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: vaultwarden
app.kubernetes.io/service: vaultwarden
helm.sh/chart: vaultwarden-4.4.0
namespace: vaultwarden
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/name: vaultwarden
---
# Source: vaultwarden/charts/cloudflared/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: vaultwarden-cloudflared
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cloudflared
app.kubernetes.io/version: 2025.10.0
helm.sh/chart: cloudflared-1.23.0
namespace: vaultwarden
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: cloudflared
app.kubernetes.io/instance: vaultwarden
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/name: cloudflared
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- --protocol
- http2
- --no-autoupdate
- run
- --token
- $(CF_MANAGED_TUNNEL_TOKEN)
env:
- name: CF_MANAGED_TUNNEL_TOKEN
valueFrom:
secretKeyRef:
key: cf-tunnel-token
name: vaultwarden-cloudflared-secret
image: cloudflare/cloudflared:2025.11.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: vaultwarden/charts/vaultwarden/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: vaultwarden
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: vaultwarden
helm.sh/chart: vaultwarden-4.4.0
namespace: vaultwarden
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: vaultwarden
app.kubernetes.io/instance: vaultwarden
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/name: vaultwarden
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: DOMAIN
value: https://passwords.alexlebens.dev
- name: SIGNUPS_ALLOWED
value: "false"
- name: INVITATIONS_ALLOWED
value: "false"
- name: DATABASE_URL
valueFrom:
secretKeyRef:
key: uri
name: vaultwarden-postgresql-17-cluster-app
image: vaultwarden/server:1.34.3
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /data
name: config
volumes:
- name: config
persistentVolumeClaim:
claimName: vaultwarden-data
---
# Source: vaultwarden/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: vaultwarden-postgresql-17-cluster
namespace: vaultwarden
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: vaultwarden-postgresql-17
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "vaultwarden-postgresql-17-external-backup"
serverName: "vaultwarden-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "vaultwarden-postgresql-17-garage-local-backup"
serverName: "vaultwarden-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "vaultwarden-postgresql-17-recovery"
serverName: vaultwarden-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: vaultwarden-postgresql-17-backup-1
externalClusters:
- name: vaultwarden-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "vaultwarden-postgresql-17-recovery"
serverName: vaultwarden-postgresql-17-backup-1
---
# Source: vaultwarden/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: vaultwarden-cloudflared-secret
namespace: vaultwarden
labels:
app.kubernetes.io/name: vaultwarden-cloudflared-secret
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/vaultwarden
metadataPolicy: None
property: token
---
# Source: vaultwarden/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: vaultwarden-data-backup-secret
namespace: vaultwarden
labels:
app.kubernetes.io/name: vaultwarden-data-backup-secret
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/vaultwarden/vaultwarden-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: vaultwarden/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: vaultwarden-postgresql-17-cluster-backup-secret
namespace: vaultwarden
labels:
app.kubernetes.io/name: vaultwarden-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: vaultwarden/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: vaultwarden-postgresql-17-cluster-backup-secret-garage
namespace: vaultwarden
labels:
app.kubernetes.io/name: vaultwarden-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: vaultwarden/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "vaultwarden-postgresql-17-external-backup"
namespace: vaultwarden
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: vaultwarden-postgresql-17
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/vaultwarden/vaultwarden-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: vaultwarden-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: vaultwarden-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: vaultwarden/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "vaultwarden-postgresql-17-garage-local-backup"
namespace: vaultwarden
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: vaultwarden-postgresql-17
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/vaultwarden/vaultwarden-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: vaultwarden-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: vaultwarden-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: vaultwarden-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: vaultwarden/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "vaultwarden-postgresql-17-recovery"
namespace: vaultwarden
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: vaultwarden-postgresql-17
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/vaultwarden/vaultwarden-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: vaultwarden-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: vaultwarden-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: vaultwarden/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: vaultwarden-postgresql-17-alert-rules
namespace: vaultwarden
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: vaultwarden-postgresql-17
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/vaultwarden-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="vaultwarden"} - cnpg_pg_replication_is_wal_receiver_up{namespace="vaultwarden"}) < 1
for: 5m
labels:
severity: critical
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="vaultwarden"} - cnpg_pg_replication_is_wal_receiver_up{namespace="vaultwarden"}) < 2
for: 5m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="vaultwarden", pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="vaultwarden", pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="vaultwarden", pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="vaultwarden", pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="vaultwarden",pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="vaultwarden", pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="vaultwarden",pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="vaultwarden", pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
---
# Source: vaultwarden/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: vaultwarden-data-backup-source
namespace: vaultwarden
labels:
app.kubernetes.io/name: vaultwarden-data-backup-source
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
spec:
sourcePVC: vaultwarden-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: vaultwarden-data-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: vaultwarden/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "vaultwarden-postgresql-17-daily-backup-scheduled-backup"
namespace: vaultwarden
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: vaultwarden-postgresql-17
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: vaultwarden-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "vaultwarden-postgresql-17-external-backup"
---
# Source: vaultwarden/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "vaultwarden-postgresql-17-live-backup-scheduled-backup"
namespace: vaultwarden
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: vaultwarden-postgresql-17
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: vaultwarden-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "vaultwarden-postgresql-17-garage-local-backup"

View File

@@ -0,0 +1,816 @@
---
# Source: yamtrack/charts/yamtrack/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: yamtrack
labels:
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: yamtrack
app.kubernetes.io/service: yamtrack
helm.sh/chart: yamtrack-4.4.0
namespace: yamtrack
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/name: yamtrack
---
# Source: yamtrack/charts/yamtrack/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: yamtrack
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: yamtrack
helm.sh/chart: yamtrack-4.4.0
namespace: yamtrack
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: yamtrack
app.kubernetes.io/instance: yamtrack
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/name: yamtrack
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: URLS
value: https://yamtrack.alexlebens.net
- name: REGISTRATION
value: "false"
- name: SOCIAL_PROVIDERS
value: allauth.socialaccount.providers.openid_connect
- name: SOCIALACCOUNT_PROVIDERS
valueFrom:
secretKeyRef:
key: SOCIALACCOUNT_PROVIDERS
name: yamtrack-oidc-secret
- name: SECRET
valueFrom:
secretKeyRef:
key: SECRET
name: yamtrack-config-secret
- name: REDIS_URL
value: redis://redis-replication-yamtrack-master.yamtrack:6379
- name: DB_USER
valueFrom:
secretKeyRef:
key: username
name: yamtrack-postgresql-17-cluster-app
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: yamtrack-postgresql-17-cluster-app
- name: DB_NAME
valueFrom:
secretKeyRef:
key: dbname
name: yamtrack-postgresql-17-cluster-app
- name: DB_HOST
valueFrom:
secretKeyRef:
key: host
name: yamtrack-postgresql-17-cluster-app
- name: DB_PORT
valueFrom:
secretKeyRef:
key: port
name: yamtrack-postgresql-17-cluster-app
image: ghcr.io/fuzzygrim/yamtrack:0.24.7
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
---
# Source: yamtrack/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: yamtrack-postgresql-17-cluster
namespace: yamtrack
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: yamtrack-postgresql-17
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "yamtrack-postgresql-17-external-backup"
serverName: "yamtrack-postgresql-17-backup-2"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "yamtrack-postgresql-17-garage-local-backup"
serverName: "yamtrack-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "yamtrack-postgresql-17-recovery"
serverName: yamtrack-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: yamtrack-postgresql-17-backup-1
externalClusters:
- name: yamtrack-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "yamtrack-postgresql-17-recovery"
serverName: yamtrack-postgresql-17-backup-1
---
# Source: yamtrack/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: yamtrack-config-secret
namespace: yamtrack
labels:
app.kubernetes.io/name: yamtrack-config-secret
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: SECRET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/yamtrack/config
metadataPolicy: None
property: SECRET
---
# Source: yamtrack/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: yamtrack-oidc-secret
namespace: yamtrack
labels:
app.kubernetes.io/name: yamtrack-oidc-secret
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: SOCIALACCOUNT_PROVIDERS
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/yamtrack
metadataPolicy: None
property: SOCIALACCOUNT_PROVIDERS
---
# Source: yamtrack/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: yamtrack-postgresql-17-cluster-backup-secret
namespace: yamtrack
labels:
app.kubernetes.io/name: yamtrack-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: yamtrack/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: yamtrack-postgresql-17-cluster-backup-secret-garage
namespace: yamtrack
labels:
app.kubernetes.io/name: yamtrack-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: yamtrack/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-yamtrack
namespace: yamtrack
labels:
app.kubernetes.io/name: http-route-yamtrack
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- yamtrack.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: yamtrack
port: 80
weight: 100
---
# Source: yamtrack/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "yamtrack-postgresql-17-external-backup"
namespace: yamtrack
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: yamtrack-postgresql-17
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/yamtrack/yamtrack-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: yamtrack-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: yamtrack-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: yamtrack/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "yamtrack-postgresql-17-garage-local-backup"
namespace: yamtrack
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: yamtrack-postgresql-17
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/yamtrack/yamtrack-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: yamtrack-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: yamtrack-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: yamtrack-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: yamtrack/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "yamtrack-postgresql-17-recovery"
namespace: yamtrack
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: yamtrack-postgresql-17
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/yamtrack/yamtrack-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: yamtrack-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: yamtrack-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: yamtrack/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: yamtrack-postgresql-17-alert-rules
namespace: yamtrack
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: yamtrack-postgresql-17
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/yamtrack-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="yamtrack"} - cnpg_pg_replication_is_wal_receiver_up{namespace="yamtrack"}) < 1
for: 5m
labels:
severity: critical
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="yamtrack"} - cnpg_pg_replication_is_wal_receiver_up{namespace="yamtrack"}) < 2
for: 5m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="yamtrack", pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="yamtrack", pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="yamtrack", pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="yamtrack", pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="yamtrack",pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="yamtrack", pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="yamtrack",pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="yamtrack", pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
---
# Source: yamtrack/templates/redis-replication.yaml
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: redis-replication-yamtrack
namespace: yamtrack
labels:
app.kubernetes.io/name: redis-replication-yamtrack
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v8.0.3
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 50m
memory: 128Mi
storage:
volumeClaimTemplate:
spec:
storageClassName: ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.48.0
---
# Source: yamtrack/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "yamtrack-postgresql-17-daily-backup-scheduled-backup"
namespace: yamtrack
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: yamtrack-postgresql-17
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: yamtrack-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "yamtrack-postgresql-17-external-backup"
---
# Source: yamtrack/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "yamtrack-postgresql-17-live-backup-scheduled-backup"
namespace: yamtrack
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: yamtrack-postgresql-17
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: yamtrack-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "yamtrack-postgresql-17-garage-local-backup"
---
# Source: yamtrack/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-replication-yamtrack
namespace: yamtrack
labels:
app.kubernetes.io/name: redis-replication-yamtrack
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
redis-operator: "true"
env: production
spec:
selector:
matchLabels:
redis_setup_type: replication
endpoints:
- port: redis-exporter
interval: 30s
scrapeTimeout: 10s