Automated Manifest Update: 2025-12-01 #2155

Merged
alexlebens merged 1 commits from auto/update-manifests-1764548222 into manifests 2025-12-01 00:18:34 +00:00
46 changed files with 26816 additions and 0 deletions

View File

@@ -0,0 +1,227 @@
---
# Source: actual/charts/actual/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: actual-data
labels:
app.kubernetes.io/instance: actual
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: actual
helm.sh/chart: actual-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: actual
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "2Gi"
storageClassName: "ceph-block"
---
# Source: actual/charts/actual/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: actual
labels:
app.kubernetes.io/instance: actual
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: actual
app.kubernetes.io/service: actual
helm.sh/chart: actual-4.4.0
namespace: actual
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 5006
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: actual
app.kubernetes.io/name: actual
---
# Source: actual/charts/actual/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: actual
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: actual
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: actual
helm.sh/chart: actual-4.4.0
namespace: actual
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: actual
app.kubernetes.io/instance: actual
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: actual
app.kubernetes.io/name: actual
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
image: ghcr.io/actualbudget/actual:25.11.0
imagePullPolicy: IfNotPresent
livenessProbe:
exec:
command:
- /usr/bin/env
- bash
- -c
- node src/scripts/health-check.js
failureThreshold: 5
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /data
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: actual-data
---
# Source: actual/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: actual-data-backup-secret
namespace: actual
labels:
app.kubernetes.io/name: actual-data-backup-secret
app.kubernetes.io/instance: actual
app.kubernetes.io/part-of: actual
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/actual/actual-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: actual/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-actual
namespace: actual
labels:
app.kubernetes.io/name: http-route-actual
app.kubernetes.io/instance: actual
app.kubernetes.io/part-of: actual
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- actual.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: actual
port: 80
weight: 100
---
# Source: actual/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: actual-data-backup-source
namespace: actual
labels:
app.kubernetes.io/name: actual-data-backup-source
app.kubernetes.io/instance: actual
app.kubernetes.io/part-of: actual
spec:
sourcePVC: actual-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: actual-data-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot

View File

@@ -0,0 +1,471 @@
---
# Source: audiobookshelf/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: audiobookshelf-nfs-storage
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-nfs-storage
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: audiobookshelf/charts/audiobookshelf/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: audiobookshelf-config
labels:
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: audiobookshelf
helm.sh/chart: audiobookshelf-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: audiobookshelf
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "2Gi"
storageClassName: "ceph-block"
---
# Source: audiobookshelf/charts/audiobookshelf/templates/common.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: audiobookshelf-metadata
labels:
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: audiobookshelf
helm.sh/chart: audiobookshelf-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: audiobookshelf
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: audiobookshelf/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: audiobookshelf-nfs-storage-backup
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-nfs-storage-backup
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
volumeMode: Filesystem
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
# Source: audiobookshelf/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: audiobookshelf-nfs-storage
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-nfs-storage
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
volumeName: audiobookshelf-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: audiobookshelf/charts/audiobookshelf/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: audiobookshelf
labels:
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: audiobookshelf
app.kubernetes.io/service: audiobookshelf
helm.sh/chart: audiobookshelf-4.4.0
namespace: audiobookshelf
spec:
type: ClusterIP
ports:
- port: 8000
targetPort: 8000
protocol: TCP
name: apprise
- port: 80
targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/name: audiobookshelf
---
# Source: audiobookshelf/charts/audiobookshelf/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: audiobookshelf
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: audiobookshelf
helm.sh/chart: audiobookshelf-4.4.0
namespace: audiobookshelf
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: audiobookshelf
app.kubernetes.io/instance: audiobookshelf
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/name: audiobookshelf
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PGID
value: "1000"
- name: PUID
value: "1000"
- name: APPRISE_STORAGE_MODE
value: memory
- name: APPRISE_STATEFUL_MODE
value: disabled
- name: APPRISE_WORKER_COUNT
value: "1"
- name: APPRISE_STATELESS_URLS
valueFrom:
secretKeyRef:
key: ntfy-url
name: audiobookshelf-apprise-config
image: caronc/apprise:1.2.6
imagePullPolicy: IfNotPresent
name: apprise-api
resources:
requests:
cpu: 10m
memory: 128Mi
- env:
- name: TZ
value: US/Central
image: ghcr.io/advplyr/audiobookshelf:2.30.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /mnt/store/
name: audiobooks
- mountPath: /metadata/backups
name: backup
- mountPath: /config
name: config
- mountPath: /metadata
name: metadata
volumes:
- name: audiobooks
persistentVolumeClaim:
claimName: audiobookshelf-nfs-storage
- name: backup
persistentVolumeClaim:
claimName: audiobookshelf-nfs-storage-backup
- name: config
persistentVolumeClaim:
claimName: audiobookshelf-config
- name: metadata
persistentVolumeClaim:
claimName: audiobookshelf-metadata
---
# Source: audiobookshelf/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: audiobookshelf-apprise-config
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-apprise-config
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ntfy-url
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/audiobookshelf/apprise
metadataPolicy: None
property: ntfy-url
---
# Source: audiobookshelf/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: audiobookshelf-config-backup-secret
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-config-backup-secret
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/audiobookshelf/audiobookshelf-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: audiobookshelf/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: audiobookshelf-metadata-backup-secret
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-metadata-backup-secret
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/audiobookshelf/audiobookshelf-metadata"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: audiobookshelf/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-audiobookshelf
namespace: audiobookshelf
labels:
app.kubernetes.io/name: http-route-audiobookshelf
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- audiobookshelf.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: audiobookshelf
port: 80
weight: 100
---
# Source: audiobookshelf/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: audiobookshelf-config-backup-source
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-config-backup-source
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
sourcePVC: audiobookshelf-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: audiobookshelf-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: audiobookshelf/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: audiobookshelf-metadata-backup-source
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-metadata-backup-source
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
sourcePVC: audiobookshelf-metadata
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: audiobookshelf-metadata-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: audiobookshelf/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: audiobookshelf-apprise
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-apprise
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
endpoints:
- port: apprise
interval: 30s
scrapeTimeout: 15s
path: /metrics
selector:
matchLabels:
app.kubernetes.io/name: audiobookshelf
app.kubernetes.io/instance: audiobookshelf

View File

@@ -0,0 +1,278 @@
---
# Source: bazarr/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: bazarr-nfs-storage
namespace: bazarr
labels:
app.kubernetes.io/name: bazarr-nfs-storage
app.kubernetes.io/instance: bazarr
app.kubernetes.io/part-of: bazarr
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: bazarr/charts/bazarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: bazarr-config
labels:
app.kubernetes.io/instance: bazarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: bazarr
helm.sh/chart: bazarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: bazarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: bazarr/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: bazarr-nfs-storage
namespace: bazarr
labels:
app.kubernetes.io/name: bazarr-nfs-storage
app.kubernetes.io/instance: bazarr
app.kubernetes.io/part-of: bazarr
spec:
volumeName: bazarr-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: bazarr/charts/bazarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: bazarr
labels:
app.kubernetes.io/instance: bazarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: bazarr
app.kubernetes.io/service: bazarr
helm.sh/chart: bazarr-4.4.0
namespace: bazarr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 6767
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: bazarr
app.kubernetes.io/name: bazarr
---
# Source: bazarr/charts/bazarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: bazarr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: bazarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: bazarr
helm.sh/chart: bazarr-4.4.0
namespace: bazarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: bazarr
app.kubernetes.io/instance: bazarr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: bazarr
app.kubernetes.io/name: bazarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 1000
runAsUser: 1000
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/bazarr:1.5.3@sha256:2be164c02c0bb311b6c32e57d3d0ddc2813d524e89ab51a3408c1bf6fafecda5
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
volumes:
- name: config
persistentVolumeClaim:
claimName: bazarr-config
- name: media
persistentVolumeClaim:
claimName: bazarr-nfs-storage
---
# Source: bazarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: bazarr-config-backup-secret
namespace: bazarr
labels:
app.kubernetes.io/name: bazarr-config-backup-secret
app.kubernetes.io/instance: bazarr
app.kubernetes.io/part-of: bazarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/bazarr/bazarr-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: bazarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-bazarr
namespace: bazarr
labels:
app.kubernetes.io/name: http-route-bazarr
app.kubernetes.io/instance: bazarr
app.kubernetes.io/part-of: bazarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- bazarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: bazarr
port: 80
weight: 100
---
# Source: bazarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: bazarr-config-backup-source
namespace: bazarr
labels:
app.kubernetes.io/name: bazarr-config-backup-source
app.kubernetes.io/instance: bazarr
app.kubernetes.io/part-of: bazarr
spec:
sourcePVC: bazarr-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: bazarr-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot

View File

@@ -0,0 +1,946 @@
---
# Source: booklore/templates/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: booklore
annotations:
volsync.backube/privileged-movers: "true"
labels:
app.kubernetes.io/name: booklore
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
---
# Source: booklore/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: booklore-books-nfs-storage
namespace: booklore
labels:
app.kubernetes.io/name: booklore-books-nfs-storage
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Books
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: booklore/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: booklore-books-import-nfs-storage
namespace: booklore
labels:
app.kubernetes.io/name: booklore-books-import-nfs-storage
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Books Import
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: booklore/charts/booklore/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: booklore-config
labels:
app.kubernetes.io/instance: booklore
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: booklore
helm.sh/chart: booklore-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: booklore
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: booklore/charts/booklore/templates/common.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: booklore-data
labels:
app.kubernetes.io/instance: booklore
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: booklore
helm.sh/chart: booklore-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: booklore
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: booklore/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: booklore-books-nfs-storage
namespace: booklore
labels:
app.kubernetes.io/name: booklore-books-nfs-storage
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
volumeName: booklore-books-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: booklore/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: booklore-books-import-nfs-storage
namespace: booklore
labels:
app.kubernetes.io/name: booklore-books-import-nfs-storage
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
volumeName: booklore-books-import-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: booklore/charts/booklore/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: booklore
labels:
app.kubernetes.io/instance: booklore
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: booklore
app.kubernetes.io/service: booklore
helm.sh/chart: booklore-4.4.0
namespace: booklore
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 6060
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: booklore
app.kubernetes.io/name: booklore
---
# Source: booklore/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: garage-ps10rp
namespace: booklore
labels:
app.kubernetes.io/name: garage-ps10rp
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
annotations:
tailscale.com/tailnet-fqdn: garage-ps10rp.boreal-beaufort.ts.net
spec:
externalName: placeholder
type: ExternalName
---
# Source: booklore/charts/booklore/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: booklore
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: booklore
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: booklore
helm.sh/chart: booklore-4.4.0
namespace: booklore
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: booklore
app.kubernetes.io/instance: booklore
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: booklore
app.kubernetes.io/name: booklore
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: America/Chicago
- name: DATABASE_URL
value: jdbc:mariadb://booklore-mariadb-cluster-primary.booklore:3306/booklore
- name: DATABASE_USERNAME
value: booklore
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: booklore-database-secret
- name: BOOKLORE_PORT
value: "6060"
- name: SWAGGER_ENABLED
value: "false"
image: ghcr.io/booklore-app/booklore:v1.12.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 50m
memory: 128Mi
volumeMounts:
- mountPath: /bookdrop
name: books-import
- mountPath: /app/data
name: config
- mountPath: /data
name: data
- mountPath: /bookdrop/ingest
name: ingest
volumes:
- emptyDir: {}
name: books-import
- name: config
persistentVolumeClaim:
claimName: booklore-config
- name: data
persistentVolumeClaim:
claimName: booklore-data
- name: ingest
persistentVolumeClaim:
claimName: booklore-books-import-nfs-storage
---
# Source: booklore/charts/mariadb-cluster/templates/database.yaml
apiVersion: k8s.mariadb.com/v1alpha1
kind: Database
metadata:
name: booklore-mariadb-cluster-booklore
namespace: booklore
labels:
helm.sh/chart: mariadb-cluster-25.10.2
app.kubernetes.io/name: mariadb-cluster
app.kubernetes.io/instance: booklore
app.kubernetes.io/version: "0.0.0"
app.kubernetes.io/managed-by: Helm
spec:
mariaDbRef:
name: booklore-mariadb-cluster
namespace: booklore
characterSet: utf8
cleanupPolicy: Delete
collate: utf8_general_ci
name: booklore
requeueInterval: 10h
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-database-secret
namespace: booklore
labels:
app.kubernetes.io/name: booklore-database-secret
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: password
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/booklore/database
metadataPolicy: None
property: password
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-data-replication-secret
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-replication-secret
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: psk.txt
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/booklore/replication
metadataPolicy: None
property: psk.txt
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-config-backup-secret
namespace: booklore
labels:
app.kubernetes.io/name: booklore-config-backup-secret
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/booklore/booklore-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_SECRET_ACCESS_KEY
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-data-backup-secret-local
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-backup-secret-local
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/booklore/booklore-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-local
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-local
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-data-backup-secret-remote
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-backup-secret-remote
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/booklore/booklore-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-remote
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-remote
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-data-backup-secret-external
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-backup-secret-external
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/booklore/booklore-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_SECRET_ACCESS_KEY
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-mariadb-cluster-backup-secret-external
namespace: booklore
labels:
app.kubernetes.io/name: booklore-mariadb-cluster-backup-secret-external
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: access
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/mariadb-backups
metadataPolicy: None
property: access
- secretKey: secret
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/mariadb-backups
metadataPolicy: None
property: secret
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-mariadb-cluster-backup-secret-garage
namespace: booklore
labels:
app.kubernetes.io/name: booklore-mariadb-cluster-backup-secret-garage
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: access
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/mariadb-backups
metadataPolicy: None
property: access
- secretKey: secret
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/mariadb-backups
metadataPolicy: None
property: secret
---
# Source: booklore/charts/mariadb-cluster/templates/grant.yaml
apiVersion: k8s.mariadb.com/v1alpha1
kind: Grant
metadata:
name: booklore-mariadb-cluster-booklore
namespace: booklore
labels:
helm.sh/chart: mariadb-cluster-25.10.2
app.kubernetes.io/name: mariadb-cluster
app.kubernetes.io/instance: booklore
app.kubernetes.io/version: "0.0.0"
app.kubernetes.io/managed-by: Helm
spec:
mariaDbRef:
name: booklore-mariadb-cluster
namespace: booklore
cleanupPolicy: Delete
database: booklore
grantOption: true
host: '%'
privileges:
- ALL PRIVILEGES
requeueInterval: 10h
retryInterval: 30s
table: '*'
username: booklore
---
# Source: booklore/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-booklore
namespace: booklore
labels:
app.kubernetes.io/name: http-route-booklore
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- booklore.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: booklore
port: 80
weight: 100
---
# Source: booklore/charts/mariadb-cluster/templates/mariadb.yaml
apiVersion: k8s.mariadb.com/v1alpha1
kind: MariaDB
metadata:
name: booklore-mariadb-cluster
namespace: booklore
labels:
helm.sh/chart: mariadb-cluster-25.10.2
app.kubernetes.io/name: mariadb-cluster
app.kubernetes.io/instance: booklore
app.kubernetes.io/version: "0.0.0"
app.kubernetes.io/managed-by: Helm
spec:
galera:
enabled: true
replicas: 3
rootPasswordSecretKeyRef:
generate: false
key: password
name: booklore-database-secret
storage:
size: 5Gi
---
# Source: booklore/charts/mariadb-cluster/templates/physicalbackup.yaml
apiVersion: k8s.mariadb.com/v1alpha1
kind: PhysicalBackup
metadata:
name: booklore-mariadb-cluster-backup-external
namespace: booklore
labels:
helm.sh/chart: mariadb-cluster-25.10.2
app.kubernetes.io/name: mariadb-cluster
app.kubernetes.io/instance: booklore
app.kubernetes.io/version: "0.0.0"
app.kubernetes.io/managed-by: Helm
spec:
mariaDbRef:
name: booklore-mariadb-cluster
namespace: booklore
compression: gzip
maxRetention: 720h
schedule:
cron: 0 0 * * 0
immediate: true
suspend: false
storage:
s3:
accessKeyIdSecretKeyRef:
key: access
name: booklore-mariadb-cluster-backup-secret-external
bucket: mariadb-backups-b230a2f5aecf080a4b372c08
endpoint: nyc3.digitaloceanspaces.com
prefix: cl01tl/booklore
region: us-east-1
secretAccessKeySecretKeyRef:
key: secret
name: booklore-mariadb-cluster-backup-secret-external
tls:
enabled: true
---
# Source: booklore/charts/mariadb-cluster/templates/physicalbackup.yaml
apiVersion: k8s.mariadb.com/v1alpha1
kind: PhysicalBackup
metadata:
name: booklore-mariadb-cluster-backup-garage
namespace: booklore
labels:
helm.sh/chart: mariadb-cluster-25.10.2
app.kubernetes.io/name: mariadb-cluster
app.kubernetes.io/instance: booklore
app.kubernetes.io/version: "0.0.0"
app.kubernetes.io/managed-by: Helm
spec:
mariaDbRef:
name: booklore-mariadb-cluster
namespace: booklore
compression: gzip
maxRetention: 360h
schedule:
cron: 0 0 * * *
immediate: true
suspend: false
storage:
s3:
accessKeyIdSecretKeyRef:
key: access
name: booklore-mariadb-cluster-backup-secret-garage
bucket: mariadb-backups
endpoint: garage-main.garage:3900
prefix: cl01tl/booklore
region: us-east-1
secretAccessKeySecretKeyRef:
key: secret
name: booklore-mariadb-cluster-backup-secret-garage
---
# Source: booklore/templates/replication-destination.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationDestination
metadata:
name: booklore-data-replication-destination
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-replication-destination
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
rsyncTLS:
copyMethod: Direct
accessModes: ["ReadWriteMany"]
destinationPVC: booklore-books-nfs-storage
keySecret: booklore-data-replication-secret
---
# Source: booklore/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-data-replication-source
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-replication-source
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
sourcePVC: booklore-data
trigger:
schedule: "0 0 * * *"
rsyncTLS:
keySecret: booklore-data-replication-secret
address: volsync-rsync-tls-dst-booklore-data-replication-destination
copyMethod: Snapshot
---
# Source: booklore/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-config-backup-source
namespace: booklore
labels:
app.kubernetes.io/name: booklore-config-backup-source
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
sourcePVC: booklore-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: booklore-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi
---
# Source: booklore/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-data-backup-source-local
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-backup-source-local
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
sourcePVC: booklore-data
trigger:
schedule: 0 2 * * *
restic:
pruneIntervalDays: 7
repository: booklore-data-backup-secret-local
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi
---
# Source: booklore/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-data-backup-source-remote
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-backup-source-remote
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
sourcePVC: booklore-data
trigger:
schedule: 0 3 * * *
restic:
pruneIntervalDays: 7
repository: booklore-data-backup-secret-remote
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi
---
# Source: booklore/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-data-backup-source-external
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-backup-source-external
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
sourcePVC: booklore-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: booklore-data-backup-secret-external
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi
---
# Source: booklore/charts/mariadb-cluster/templates/user.yaml
apiVersion: k8s.mariadb.com/v1alpha1
kind: User
metadata:
name: booklore-mariadb-cluster-booklore
namespace: booklore
labels:
helm.sh/chart: mariadb-cluster-25.10.2
app.kubernetes.io/name: mariadb-cluster
app.kubernetes.io/instance: booklore
app.kubernetes.io/version: "0.0.0"
app.kubernetes.io/managed-by: Helm
spec:
mariaDbRef:
name: booklore-mariadb-cluster
namespace: booklore
cleanupPolicy: Delete
host: '%'
name: booklore
passwordSecretKeyRef:
key: password
name: booklore-database-secret
requeueInterval: 10h
retryInterval: 30s

View File

@@ -0,0 +1,251 @@
---
# Source: code-server/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: code-server-nfs-storage
namespace: code-server
labels:
app.kubernetes.io/name: code-server-nfs-storage
app.kubernetes.io/instance: code-server
app.kubernetes.io/part-of: code-server
spec:
volumeMode: Filesystem
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
# Source: code-server/charts/code-server/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: code-server
labels:
app.kubernetes.io/instance: code-server
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: code-server
app.kubernetes.io/service: code-server
helm.sh/chart: code-server-4.4.0
namespace: code-server
spec:
type: ClusterIP
ports:
- port: 8443
targetPort: 8443
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: code-server
app.kubernetes.io/name: code-server
---
# Source: code-server/charts/cloudflared/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: code-server-cloudflared
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: code-server
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cloudflared
app.kubernetes.io/version: 2025.10.0
helm.sh/chart: cloudflared-1.23.0
namespace: code-server
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: cloudflared
app.kubernetes.io/instance: code-server
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: code-server
app.kubernetes.io/name: cloudflared
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- --protocol
- http2
- --no-autoupdate
- run
- --token
- $(CF_MANAGED_TUNNEL_TOKEN)
env:
- name: CF_MANAGED_TUNNEL_TOKEN
valueFrom:
secretKeyRef:
key: cf-tunnel-token
name: code-server-cloudflared-secret
image: cloudflare/cloudflared:2025.11.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: code-server/charts/code-server/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: code-server
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: code-server
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: code-server
helm.sh/chart: code-server-4.4.0
namespace: code-server
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: code-server
app.kubernetes.io/instance: code-server
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: code-server
app.kubernetes.io/name: code-server
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: DEFAULT_WORKSPACE
value: /config
envFrom:
- secretRef:
name: codeserver-password-secret
image: ghcr.io/linuxserver/code-server:4.106.2@sha256:a98afdbcb59559f11e5e8df284062e55da1076b2e470e13db4aae133ea82bad0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
volumes:
- name: config
persistentVolumeClaim:
claimName: code-server-nfs-storage
---
# Source: code-server/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: codeserver-password-secret
namespace: code-server
labels:
app.kubernetes.io/name: codeserver-password-secret
app.kubernetes.io/instance: code-server
app.kubernetes.io/part-of: code-server
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/code-server/auth
metadataPolicy: None
property: PASSWORD
- secretKey: SUDO_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/code-server/auth
metadataPolicy: None
property: SUDO_PASSWORD
---
# Source: code-server/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: code-server-cloudflared-secret
namespace: code-server
labels:
app.kubernetes.io/name: code-server-cloudflared-secret
app.kubernetes.io/instance: code-server
app.kubernetes.io/part-of: code-server
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/codeserver
metadataPolicy: None
property: token
---
# Source: code-server/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-code-server
namespace: code-server
labels:
app.kubernetes.io/name: http-route-code-server
app.kubernetes.io/instance: code-server
app.kubernetes.io/part-of: code-server
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- code-server.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: code-server
port: 8443
weight: 100

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,258 @@
---
# Source: element-web/charts/element-web/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: element-web
labels:
helm.sh/chart: element-web-1.4.24
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: element-web
app.kubernetes.io/version: "1.12.4"
app.kubernetes.io/managed-by: Helm
---
# Source: element-web/charts/element-web/templates/configuration-nginx.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: element-web-nginx
labels:
helm.sh/chart: element-web-1.4.24
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: element-web
app.kubernetes.io/version: "1.12.4"
app.kubernetes.io/managed-by: Helm
data:
default.conf: |
server {
listen 8080;
listen [::]:8080;
server_name localhost;
root /usr/share/nginx/html;
index index.html;
add_header X-Frame-Options SAMEORIGIN;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
add_header Content-Security-Policy "frame-ancestors 'self'";
# Set no-cache for the index.html only so that browsers always check for a new copy of Element Web.
location = /index.html {
add_header Cache-Control "no-cache";
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
}
---
# Source: element-web/charts/element-web/templates/configuration.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: element-web
labels:
helm.sh/chart: element-web-1.4.24
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: element-web
app.kubernetes.io/version: "1.12.4"
app.kubernetes.io/managed-by: Helm
data:
config.json: |
{"brand":"Alex Lebens","branding":{"auth_header_logo_url":"https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/logo-new-round.png","welcome_background_url":"https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/background.jpg"},"default_country_code":"US","default_server_config":{"m.homeserver":{"base_url":"https://matrix.alexlebens.dev","server_name":"alexlebens.dev"},"m.identity_server":{"base_url":"https://alexlebens.dev"}},"default_theme":"dark","disable_3pid_login":true,"sso_redirect_options":{"immediate":true}}
---
# Source: element-web/charts/element-web/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: element-web
labels:
helm.sh/chart: element-web-1.4.24
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: element-web
app.kubernetes.io/version: "1.12.4"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: element-web
---
# Source: element-web/charts/cloudflared/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: element-web-cloudflared
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: element-web
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cloudflared
app.kubernetes.io/version: 2025.10.0
helm.sh/chart: cloudflared-1.23.0
namespace: element-web
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: cloudflared
app.kubernetes.io/instance: element-web
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: element-web
app.kubernetes.io/name: cloudflared
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- --protocol
- http2
- --no-autoupdate
- run
- --token
- $(CF_MANAGED_TUNNEL_TOKEN)
env:
- name: CF_MANAGED_TUNNEL_TOKEN
valueFrom:
secretKeyRef:
key: cf-tunnel-token
name: element-web-cloudflared-secret
image: cloudflare/cloudflared:2025.11.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: element-web/charts/element-web/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: element-web
labels:
helm.sh/chart: element-web-1.4.24
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: element-web
app.kubernetes.io/version: "1.12.4"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: element-web
template:
metadata:
annotations:
checksum/config: e4e49fadd0eaedd59d5adab594fb3e159fcaaecf883c31012f72a55c7785e1c4
checksum/config-nginx: 0d6dce57e41259f77d072cd0381296fb272ba1c62d8817d5fd742da9ccce5aa1
labels:
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: element-web
spec:
serviceAccountName: element-web
securityContext:
{}
containers:
- name: element-web
securityContext:
{}
image: "vectorim/element-web:v1.12.4"
imagePullPolicy: IfNotPresent
env:
- name: ELEMENT_WEB_PORT
value: '8080'
ports:
- name: http
containerPort: 8080
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /app/config.json
name: config
subPath: config.json
- mountPath: /etc/nginx/conf.d/config.json
name: config-nginx
subPath: config.json
volumes:
- name: config
configMap:
name: element-web
- name: config-nginx
configMap:
name: element-web-nginx
---
# Source: element-web/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: element-web-cloudflared-secret
namespace: element-web
labels:
app.kubernetes.io/name: element-web-cloudflared-secret
app.kubernetes.io/instance: element-web
app.kubernetes.io/part-of: element-web
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/element
metadataPolicy: None
property: token
---
# Source: element-web/charts/element-web/templates/tests/test-connection.yaml
apiVersion: v1
kind: Pod
metadata:
name: "element-web-test-connection"
labels:
helm.sh/chart: element-web-1.4.24
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: element-web
app.kubernetes.io/version: "1.12.4"
app.kubernetes.io/managed-by: Helm
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['element-web:80']
restartPolicy: Never

View File

@@ -0,0 +1,360 @@
---
# Source: ephemera/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: ephemera-import-nfs-storage
namespace: ephemera
labels:
app.kubernetes.io/name: ephemera-import-nfs-storage
app.kubernetes.io/instance: ephemera
app.kubernetes.io/part-of: ephemera
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Books Import
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: ephemera/charts/ephemera/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ephemera
labels:
app.kubernetes.io/instance: ephemera
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: ephemera
helm.sh/chart: ephemera-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: ephemera
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: ephemera/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ephemera-import-nfs-storage
namespace: ephemera
labels:
app.kubernetes.io/name: ephemera-import-nfs-storage
app.kubernetes.io/instance: ephemera
app.kubernetes.io/part-of: ephemera
spec:
volumeName: ephemera-import-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: ephemera/charts/ephemera/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: ephemera
labels:
app.kubernetes.io/instance: ephemera
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: ephemera
app.kubernetes.io/service: ephemera
helm.sh/chart: ephemera-4.4.0
namespace: ephemera
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8286
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: ephemera
app.kubernetes.io/name: ephemera
---
# Source: ephemera/charts/ephemera/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: ephemera
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: ephemera
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: ephemera
helm.sh/chart: ephemera-4.4.0
namespace: ephemera
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: ephemera
app.kubernetes.io/instance: ephemera
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: ephemera
app.kubernetes.io/name: ephemera
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: APPRISE_STORAGE_MODE
value: memory
- name: APPRISE_STATEFUL_MODE
value: disabled
- name: APPRISE_WORKER_COUNT
value: "1"
- name: APPRISE_STATELESS_URLS
valueFrom:
secretKeyRef:
key: ntfy-url
name: ephemera-apprise-config
image: caronc/apprise:1.2.6
imagePullPolicy: IfNotPresent
name: apprise-api
resources:
requests:
cpu: 10m
memory: 128Mi
- env:
- name: LOG_LEVEL
value: info
- name: LOG_HTML
value: "false"
- name: CAPTCHA_SOLVER
value: none
- name: TZ
value: America/Chicago
image: ghcr.io/flaresolverr/flaresolverr:v3.4.5
imagePullPolicy: IfNotPresent
name: flaresolverr
resources:
requests:
cpu: 10m
memory: 128Mi
- env:
- name: AA_BASE_URL
value: https://annas-archive.org
- name: FLARESOLVERR_URL
value: http://127.0.0.1:8191
- name: LG_BASE_URL
value: https://gen.com
- name: PUID
value: "0"
- name: PGID
value: "0"
image: ghcr.io/orwellianepilogue/ephemera:1.3.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 50m
memory: 128Mi
volumeMounts:
- mountPath: /app/downloads
name: cache
- mountPath: /app/data
name: config
- mountPath: /app/ingest
name: ingest
volumes:
- emptyDir: {}
name: cache
- name: config
persistentVolumeClaim:
claimName: ephemera
- name: ingest
persistentVolumeClaim:
claimName: ephemera-import-nfs-storage
---
# Source: ephemera/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: ephemera-key-secret
namespace: ephemera
labels:
app.kubernetes.io/name: ephemera-key-secret
app.kubernetes.io/instance: ephemera
app.kubernetes.io/part-of: ephemera
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/ephemera/config
metadataPolicy: None
property: key
---
# Source: ephemera/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: ephemera-apprise-config
namespace: ephemera
labels:
app.kubernetes.io/name: ephemera-apprise-config
app.kubernetes.io/instance: ephemera
app.kubernetes.io/part-of: ephemera
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ntfy-url
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/ephemera/config
metadataPolicy: None
property: ntfy-url
---
# Source: ephemera/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: ephemera-config-backup-secret
namespace: ephemera
labels:
app.kubernetes.io/name: ephemera-config-backup-secret
app.kubernetes.io/instance: ephemera
app.kubernetes.io/part-of: ephemera
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/ephemera/ephemera-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: ephemera/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-ephemera
namespace: ephemera
labels:
app.kubernetes.io/name: http-route-ephemera
app.kubernetes.io/instance: ephemera
app.kubernetes.io/part-of: ephemera
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- ephemera.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: ephemera
port: 80
weight: 100
---
# Source: ephemera/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: ephemera-config-backup-source
namespace: ephemera
labels:
app.kubernetes.io/name: ephemera-config-backup-source
app.kubernetes.io/instance: ephemera
app.kubernetes.io/part-of: ephemera
spec:
sourcePVC: ephemera-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: ephemera-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,283 @@
---
# Source: home-assistant/charts/home-assistant/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: home-assistant-config
labels:
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: home-assistant
helm.sh/chart: home-assistant-4.4.0
namespace: home-assistant
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: home-assistant/charts/home-assistant/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: home-assistant-code-server
labels:
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: home-assistant
app.kubernetes.io/service: home-assistant-code-server
helm.sh/chart: home-assistant-4.4.0
namespace: home-assistant
spec:
type: ClusterIP
ports:
- port: 8443
targetPort: 8443
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/name: home-assistant
---
# Source: home-assistant/charts/home-assistant/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: home-assistant-main
labels:
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: home-assistant
app.kubernetes.io/service: home-assistant-main
helm.sh/chart: home-assistant-4.4.0
namespace: home-assistant
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8123
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/name: home-assistant
---
# Source: home-assistant/charts/home-assistant/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: home-assistant
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: home-assistant
helm.sh/chart: home-assistant-4.4.0
namespace: home-assistant
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: home-assistant
app.kubernetes.io/instance: home-assistant
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/name: home-assistant
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: DEFAULT_WORKSPACE
value: /config
envFrom:
- secretRef:
name: home-assistant-code-server-password-secret
image: ghcr.io/linuxserver/code-server:4.106.2@sha256:a98afdbcb59559f11e5e8df284062e55da1076b2e470e13db4aae133ea82bad0
imagePullPolicy: IfNotPresent
name: code-server
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config/home-assistant
name: config
- env:
- name: TZ
value: US/Central
image: ghcr.io/home-assistant/home-assistant:2025.11.3
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 50m
memory: 512Mi
volumeMounts:
- mountPath: /config
name: config
volumes:
- name: config
persistentVolumeClaim:
claimName: home-assistant-config
---
# Source: home-assistant/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: home-assistant-code-server-password-secret
namespace: home-assistant
labels:
app.kubernetes.io/name: home-assistant-code-server-password-secret
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/part-of: home-assistant
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/home-assistant/code-server/auth
metadataPolicy: None
property: PASSWORD
- secretKey: SUDO_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/home-assistant/code-server/auth
metadataPolicy: None
property: SUDO_PASSWORD
---
# Source: home-assistant/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: home-assistant-token-secret
namespace: home-assistant
labels:
app.kubernetes.io/name: home-assistant-token-secret
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/part-of: home-assistant
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: bearer-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/home-assistant/auth
metadataPolicy: None
property: bearer-token
---
# Source: home-assistant/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-home-assistant
namespace: home-assistant
labels:
app.kubernetes.io/name: http-route-home-assistant
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/part-of: home-assistant
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- home-assistant.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: home-assistant-main
port: 80
weight: 100
---
# Source: home-assistant/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-home-assistant-code-server
namespace: home-assistant
labels:
app.kubernetes.io/name: http-route-home-assistant-code-server
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/part-of: home-assistant
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- home-assistant-code-server.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: home-assistant-code-server
port: 8443
weight: 100
---
# Source: home-assistant/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: home-assistant
namespace: home-assistant
labels:
app.kubernetes.io/name: home-assistant
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/part-of: home-assistant
spec:
selector:
matchLabels:
app.kubernetes.io/name: home-assistant
app.kubernetes.io/service: home-assistant-main
app.kubernetes.io/instance: home-assistant
endpoints:
- port: http
interval: 3m
scrapeTimeout: 1m
path: /api/prometheus
bearerTokenSecret:
name: home-assistant-token-secret
key: bearer-token

View File

@@ -0,0 +1,307 @@
---
# Source: homepage/charts/homepage/templates/common.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
name: homepage-dev
labels:
app.kubernetes.io/instance: homepage-dev
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: homepage
helm.sh/chart: homepage-4.4.0
namespace: homepage-dev
data:
bookmarks.yaml: ""
docker.yaml: ""
kubernetes.yaml: ""
services.yaml: |
- Applications:
- Auth:
icon: sh-authentik.webp
description: Authentik
href: https://auth.alexlebens.dev
siteMonitor: https://auth.alexlebens.dev
statusStyle: dot
- Gitea:
icon: sh-gitea.webp
description: Gitea
href: https://gitea.alexlebens.dev
siteMonitor: https://gitea.alexlebens.dev
statusStyle: dot
- Code:
icon: sh-visual-studio-code.webp
description: VS Code
href: https://codeserver.alexlebens.dev
siteMonitor: https://codeserver.alexlebens.dev
statusStyle: dot
- Site:
icon: https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/logo-new-round.png
description: Profile Website
href: https://www.alexlebens.dev
siteMonitor: https://www.alexlebens.dev
statusStyle: dot
- Content Management:
icon: directus.png
description: Directus
href: https://directus.alexlebens.dev
siteMonitor: https://directus.alexlebens.dev
statusStyle: dot
- Social Media Management:
icon: sh-postiz.webp
description: Postiz
href: https://postiz.alexlebens.dev
siteMonitor: https://postiz.alexlebens.dev
statusStyle: dot
- Chat:
icon: sh-element.webp
description: Matrix
href: https://chat.alexlebens.dev
siteMonitor: https://chat.alexlebens.dev
statusStyle: dot
- Wiki:
icon: sh-outline.webp
description: Outline
href: https://wiki.alexlebens.dev
siteMonitor: https://wiki.alexlebens.dev
statusStyle: dot
- Passwords:
icon: sh-vaultwarden-light.webp
description: Vaultwarden
href: https://passwords.alexlebens.dev
siteMonitor: https://passwords.alexlebens.dev
statusStyle: dot
- Bookmarks:
icon: sh-karakeep-light.webp
description: Karakeep
href: https://karakeep.alexlebens.dev
siteMonitor: https://karakeep.alexlebens.dev
statusStyle: dot
- RSS:
icon: sh-freshrss.webp
description: FreshRSS
href: https://rss.alexlebens.dev
siteMonitor: https://rss.alexlebens.dev
statusStyle: dot
settings.yaml: |
favicon: https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/logo-new-round.svg
headerStyle: clean
hideVersion: true
color: zinc
background:
image: https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/background.jpg
brightness: 50
theme: dark
disableCollapse: true
widgets.yaml: |
- logo:
icon: https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/logo-new-round.png
- datetime:
text_size: xl
format:
dateStyle: long
timeStyle: short
hour12: false
- openmeteo:
label: St. Paul
latitude: 44.954445
longitude: -93.091301
timezone: America/Chicago
units: metric
cache: 5
format:
maximumFractionDigits: 0
---
# Source: homepage/charts/homepage/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: homepage-dev
labels:
app.kubernetes.io/instance: homepage-dev
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: homepage
app.kubernetes.io/service: homepage-dev
helm.sh/chart: homepage-4.4.0
namespace: homepage-dev
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 3000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: homepage-dev
app.kubernetes.io/name: homepage
---
# Source: homepage/charts/cloudflared/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: homepage-dev-cloudflared
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: homepage-dev
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cloudflared
app.kubernetes.io/version: 2025.10.0
helm.sh/chart: cloudflared-1.23.0
namespace: homepage-dev
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: cloudflared
app.kubernetes.io/instance: homepage-dev
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: homepage-dev
app.kubernetes.io/name: cloudflared
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- --protocol
- http2
- --no-autoupdate
- run
- --token
- $(CF_MANAGED_TUNNEL_TOKEN)
env:
- name: CF_MANAGED_TUNNEL_TOKEN
valueFrom:
secretKeyRef:
key: cf-tunnel-token
name: homepage-dev-cloudflared-secret
image: cloudflare/cloudflared:2025.11.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: homepage/charts/homepage/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: homepage-dev
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: homepage-dev
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: homepage
helm.sh/chart: homepage-4.4.0
annotations:
reloader.stakater.com/auto: "true"
namespace: homepage-dev
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: homepage
app.kubernetes.io/instance: homepage-dev
template:
metadata:
annotations:
checksum/configMaps: d1306b9af923c5b3f02566a43c7a141c7168ebf8a74e5ff1a2d5d8082001c1a1
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: homepage-dev
app.kubernetes.io/name: homepage
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: HOMEPAGE_ALLOWED_HOSTS
value: home.alexlebens.dev
image: ghcr.io/gethomepage/homepage:v1.7.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /app/config/bookmarks.yaml
mountPropagation: None
name: config
readOnly: true
subPath: bookmarks.yaml
- mountPath: /app/config/docker.yaml
mountPropagation: None
name: config
readOnly: true
subPath: docker.yaml
- mountPath: /app/config/kubernetes.yaml
mountPropagation: None
name: config
readOnly: true
subPath: kubernetes.yaml
- mountPath: /app/config/services.yaml
mountPropagation: None
name: config
readOnly: true
subPath: services.yaml
- mountPath: /app/config/settings.yaml
mountPropagation: None
name: config
readOnly: true
subPath: settings.yaml
- mountPath: /app/config/widgets.yaml
mountPropagation: None
name: config
readOnly: true
subPath: widgets.yaml
volumes:
- configMap:
name: homepage-dev
name: config
---
# Source: homepage/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: homepage-dev-cloudflared-secret
namespace: homepage-dev
labels:
app.kubernetes.io/name: homepage-dev-cloudflared-secret
app.kubernetes.io/instance: homepage-dev
app.kubernetes.io/part-of: homepage-dev
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/homepage-dev
metadataPolicy: None
property: token

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,129 @@
---
# Source: huntarr/charts/huntarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: huntarr-config
labels:
app.kubernetes.io/instance: huntarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: huntarr
helm.sh/chart: huntarr-4.4.0
namespace: huntarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: huntarr/charts/huntarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: huntarr
labels:
app.kubernetes.io/instance: huntarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: huntarr
app.kubernetes.io/service: huntarr
helm.sh/chart: huntarr-4.4.0
namespace: huntarr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 9705
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: huntarr
app.kubernetes.io/name: huntarr
---
# Source: huntarr/charts/huntarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: huntarr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: huntarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: huntarr
helm.sh/chart: huntarr-4.4.0
namespace: huntarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: huntarr
app.kubernetes.io/instance: huntarr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: huntarr
app.kubernetes.io/name: huntarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
image: ghcr.io/plexguide/huntarr:8.2.10
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
volumes:
- name: config
persistentVolumeClaim:
claimName: huntarr-config
---
# Source: huntarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-huntarr
namespace: huntarr
labels:
app.kubernetes.io/name: http-route-huntarr
app.kubernetes.io/instance: huntarr
app.kubernetes.io/part-of: huntarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- huntarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: huntarr
port: 80
weight: 100

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,326 @@
---
# Source: jellyfin/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: jellyfin-nfs-storage
namespace: jellyfin
labels:
app.kubernetes.io/name: jellyfin-nfs-storage
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: jellyfin/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: jellyfin-youtube-nfs-storage
namespace: jellyfin
labels:
app.kubernetes.io/name: jellyfin-youtube-nfs-storage
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadOnlyMany
nfs:
path: /volume2/Storage/YouTube
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: jellyfin/charts/jellyfin/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: jellyfin-config
labels:
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: jellyfin
helm.sh/chart: jellyfin-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: jellyfin
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "100Gi"
storageClassName: "ceph-block"
---
# Source: jellyfin/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jellyfin-nfs-storage
namespace: jellyfin
labels:
app.kubernetes.io/name: jellyfin-nfs-storage
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
volumeName: jellyfin-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: jellyfin/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jellyfin-youtube-nfs-storage
namespace: jellyfin
labels:
app.kubernetes.io/name: jellyfin-youtube-nfs-storage
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
volumeName: jellyfin-youtube-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadOnlyMany
resources:
requests:
storage: 1Gi
---
# Source: jellyfin/charts/jellyfin/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: jellyfin
labels:
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: jellyfin
app.kubernetes.io/service: jellyfin
helm.sh/chart: jellyfin-4.4.0
namespace: jellyfin
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8096
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/name: jellyfin
---
# Source: jellyfin/charts/jellyfin/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: jellyfin
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: jellyfin
helm.sh/chart: jellyfin-4.4.0
namespace: jellyfin
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: jellyfin
app.kubernetes.io/instance: jellyfin
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/name: jellyfin
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: JELLYFIN_hostwebclient
value: "true"
- name: JELLYFIN_PublishedServerUrl
value: https://jellyfin.alexlebens.net/
image: ghcr.io/jellyfin/jellyfin:10.11.3
imagePullPolicy: IfNotPresent
name: main
resources:
limits:
gpu.intel.com/i915: 1
requests:
cpu: 1
gpu.intel.com/i915: 1
memory: 2Gi
volumeMounts:
- mountPath: /cache
name: cache
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- mountPath: /mnt/youtube
name: youtube
readOnly: true
volumes:
- emptyDir: {}
name: cache
- name: config
persistentVolumeClaim:
claimName: jellyfin-config
- name: media
persistentVolumeClaim:
claimName: jellyfin-nfs-storage
- name: youtube
persistentVolumeClaim:
claimName: jellyfin-youtube-nfs-storage
---
# Source: jellyfin/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: jellyfin-config-backup-secret
namespace: jellyfin
labels:
app.kubernetes.io/name: jellyfin-config-backup-secret
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/jellyfin/jellyfin-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: jellyfin/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-jellyfin
namespace: jellyfin
labels:
app.kubernetes.io/name: http-route-jellyfin
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- jellyfin.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: jellyfin
port: 80
weight: 100
---
# Source: jellyfin/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: jellyfin-config-backup-source
namespace: jellyfin
labels:
app.kubernetes.io/name: jellyfin-config-backup-source
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
sourcePVC: jellyfin-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: jellyfin-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi

View File

@@ -0,0 +1,861 @@
---
# Source: jellystat/charts/jellystat/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: jellystat-data
labels:
app.kubernetes.io/instance: jellystat
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: jellystat
helm.sh/chart: jellystat-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: jellystat
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: jellystat/charts/jellystat/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: jellystat
labels:
app.kubernetes.io/instance: jellystat
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: jellystat
app.kubernetes.io/service: jellystat
helm.sh/chart: jellystat-4.4.0
namespace: jellystat
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 3000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: jellystat
app.kubernetes.io/name: jellystat
---
# Source: jellystat/charts/jellystat/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: jellystat
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: jellystat
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: jellystat
helm.sh/chart: jellystat-4.4.0
namespace: jellystat
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: jellystat
app.kubernetes.io/instance: jellystat
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: jellystat
app.kubernetes.io/name: jellystat
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: JWT_SECRET
valueFrom:
secretKeyRef:
key: secret-key
name: jellystat-secret
- name: JS_USER
valueFrom:
secretKeyRef:
key: user
name: jellystat-secret
- name: JS_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: jellystat-secret
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
key: username
name: jellystat-postgresql-17-cluster-app
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: jellystat-postgresql-17-cluster-app
- name: POSTGRES_DB
valueFrom:
secretKeyRef:
key: dbname
name: jellystat-postgresql-17-cluster-app
- name: POSTGRES_IP
valueFrom:
secretKeyRef:
key: host
name: jellystat-postgresql-17-cluster-app
- name: POSTGRES_PORT
valueFrom:
secretKeyRef:
key: port
name: jellystat-postgresql-17-cluster-app
image: cyfershepard/jellystat:1.1.6
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /app/backend/backup-data
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: jellystat-data
---
# Source: jellystat/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: jellystat-postgresql-17-cluster
namespace: jellystat
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: jellystat-postgresql-17
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "jellystat-postgresql-17-external-backup"
serverName: "jellystat-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "jellystat-postgresql-17-garage-local-backup"
serverName: "jellystat-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "jellystat-postgresql-17-recovery"
serverName: jellystat-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: jellystat-postgresql-17-backup-1
externalClusters:
- name: jellystat-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "jellystat-postgresql-17-recovery"
serverName: jellystat-postgresql-17-backup-1
---
# Source: jellystat/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: jellystat-secret
namespace: jellystat
labels:
app.kubernetes.io/name: jellystat-secret
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: secret-key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/jellystat/auth
metadataPolicy: None
property: secret-key
- secretKey: user
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/jellystat/auth
metadataPolicy: None
property: user
- secretKey: password
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/jellystat/auth
metadataPolicy: None
property: password
---
# Source: jellystat/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: jellystat-data-backup-secret
namespace: jellystat
labels:
app.kubernetes.io/name: jellystat-data-backup-secret
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/jellystat/jellystat-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: jellystat/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: jellystat-postgresql-17-cluster-backup-secret
namespace: jellystat
labels:
app.kubernetes.io/name: jellystat-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: jellystat/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: jellystat-postgresql-17-cluster-backup-secret-garage
namespace: jellystat
labels:
app.kubernetes.io/name: jellystat-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: jellystat/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-jellystat
namespace: jellystat
labels:
app.kubernetes.io/name: http-route-jellystat
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- jellystat.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: jellystat
port: 80
weight: 100
---
# Source: jellystat/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "jellystat-postgresql-17-external-backup"
namespace: jellystat
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: jellystat-postgresql-17
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/jellystat/jellystat-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: jellystat-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: jellystat-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: jellystat/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "jellystat-postgresql-17-garage-local-backup"
namespace: jellystat
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: jellystat-postgresql-17
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/jellystat/jellystat-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: jellystat-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: jellystat-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: jellystat-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: jellystat/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "jellystat-postgresql-17-recovery"
namespace: jellystat
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: jellystat-postgresql-17
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/jellystat/jellystat-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: jellystat-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: jellystat-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: jellystat/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: jellystat-postgresql-17-alert-rules
namespace: jellystat
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: jellystat-postgresql-17
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/jellystat-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="jellystat"} - cnpg_pg_replication_is_wal_receiver_up{namespace="jellystat"}) < 1
for: 5m
labels:
severity: critical
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="jellystat"} - cnpg_pg_replication_is_wal_receiver_up{namespace="jellystat"}) < 2
for: 5m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="jellystat",pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="jellystat",pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
---
# Source: jellystat/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: jellystat-data-backup-source
namespace: jellystat
labels:
app.kubernetes.io/name: jellystat-data-backup-source
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
spec:
sourcePVC: jellystat-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: jellystat-data-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: jellystat/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "jellystat-postgresql-17-daily-backup-scheduled-backup"
namespace: jellystat
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: jellystat-postgresql-17
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: jellystat-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "jellystat-postgresql-17-external-backup"
---
# Source: jellystat/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "jellystat-postgresql-17-live-backup-scheduled-backup"
namespace: jellystat
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: jellystat-postgresql-17
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: jellystat-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "jellystat-postgresql-17-garage-local-backup"

View File

@@ -0,0 +1,711 @@
---
# Source: karakeep/charts/meilisearch/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: karakeep-meilisearch
labels:
helm.sh/chart: meilisearch-0.17.1
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/version: "v1.18.0"
app.kubernetes.io/component: search-engine
app.kubernetes.io/part-of: meilisearch
app.kubernetes.io/managed-by: Helm
---
# Source: karakeep/charts/meilisearch/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: karakeep-meilisearch-environment
labels:
helm.sh/chart: meilisearch-0.17.1
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/version: "v1.18.0"
app.kubernetes.io/component: search-engine
app.kubernetes.io/part-of: meilisearch
app.kubernetes.io/managed-by: Helm
data:
MEILI_ENV: "production"
MEILI_EXPERIMENTAL_DUMPLESS_UPGRADE: "true"
MEILI_NO_ANALYTICS: "true"
MEILI_EXPERIMENTAL_ENABLE_METRICS: "true"
---
# Source: karakeep/charts/karakeep/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: karakeep
labels:
app.kubernetes.io/instance: karakeep
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: karakeep
helm.sh/chart: karakeep-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: karakeep
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: karakeep/charts/meilisearch/templates/pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: karakeep-meilisearch
labels:
helm.sh/chart: meilisearch-0.17.1
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/version: "v1.18.0"
app.kubernetes.io/component: search-engine
app.kubernetes.io/part-of: meilisearch
app.kubernetes.io/managed-by: Helm
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: karakeep/charts/karakeep/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: karakeep
labels:
app.kubernetes.io/instance: karakeep
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: karakeep
app.kubernetes.io/service: karakeep
helm.sh/chart: karakeep-4.4.0
namespace: karakeep
spec:
type: ClusterIP
ports:
- port: 9222
targetPort: 9222
protocol: TCP
name: chrome
- port: 3000
targetPort: 3000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: karakeep
app.kubernetes.io/name: karakeep
---
# Source: karakeep/charts/meilisearch/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: karakeep-meilisearch
labels:
helm.sh/chart: meilisearch-0.17.1
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/version: "v1.18.0"
app.kubernetes.io/component: search-engine
app.kubernetes.io/part-of: meilisearch
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 7700
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
---
# Source: karakeep/charts/cloudflared/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: karakeep-cloudflared
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: karakeep
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cloudflared
app.kubernetes.io/version: 2025.10.0
helm.sh/chart: cloudflared-1.23.0
namespace: karakeep
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: cloudflared
app.kubernetes.io/instance: karakeep
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: karakeep
app.kubernetes.io/name: cloudflared
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- --protocol
- http2
- --no-autoupdate
- run
- --token
- $(CF_MANAGED_TUNNEL_TOKEN)
env:
- name: CF_MANAGED_TUNNEL_TOKEN
valueFrom:
secretKeyRef:
key: cf-tunnel-token
name: karakeep-cloudflared-secret
image: cloudflare/cloudflared:2025.11.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: karakeep/charts/karakeep/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: karakeep
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: karakeep
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: karakeep
helm.sh/chart: karakeep-4.4.0
namespace: karakeep
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: karakeep
app.kubernetes.io/instance: karakeep
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: karakeep
app.kubernetes.io/name: karakeep
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- --no-sandbox
- --disable-gpu
- --disable-dev-shm-usage
- --remote-debugging-address=0.0.0.0
- --remote-debugging-port=9222
- --hide-scrollbars
image: gcr.io/zenika-hub/alpine-chrome:124
imagePullPolicy: IfNotPresent
name: chrome
resources:
requests:
cpu: 10m
memory: 128Mi
- env:
- name: DATA_DIR
value: /data
- name: DB_WAL_MODE
value: "true"
- name: NEXTAUTH_URL
value: https://karakeep.alexlebens.dev/
- name: NEXTAUTH_SECRET
valueFrom:
secretKeyRef:
key: key
name: karakeep-key-secret
- name: PROMETHEUS_AUTH_TOKEN
valueFrom:
secretKeyRef:
key: prometheus-token
name: karakeep-key-secret
- name: ASSET_STORE_S3_ENDPOINT
value: http://rook-ceph-rgw-ceph-objectstore.rook-ceph.svc:80
- name: ASSET_STORE_S3_REGION
value: us-east-1
- name: ASSET_STORE_S3_BUCKET
valueFrom:
configMapKeyRef:
key: BUCKET_NAME
name: ceph-bucket-karakeep
- name: ASSET_STORE_S3_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: ceph-bucket-karakeep
- name: ASSET_STORE_S3_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: ceph-bucket-karakeep
- name: ASSET_STORE_S3_FORCE_PATH_STYLE
value: "true"
- name: MEILI_ADDR
value: http://karakeep-meilisearch.karakeep:7700
- name: MEILI_MASTER_KEY
valueFrom:
secretKeyRef:
key: MEILI_MASTER_KEY
name: karakeep-meilisearch-master-key-secret
- name: BROWSER_WEB_URL
value: http://karakeep.karakeep:9222
- name: DISABLE_SIGNUPS
value: "false"
- name: OAUTH_PROVIDER_NAME
value: Authentik
- name: OAUTH_WELLKNOWN_URL
value: https://auth.alexlebens.dev/application/o/karakeep/.well-known/openid-configuration
- name: OAUTH_SCOPE
value: openid email profile
- name: OAUTH_CLIENT_ID
valueFrom:
secretKeyRef:
key: AUTHENTIK_CLIENT_ID
name: karakeep-oidc-secret
- name: OAUTH_CLIENT_SECRET
valueFrom:
secretKeyRef:
key: AUTHENTIK_CLIENT_SECRET
name: karakeep-oidc-secret
- name: OLLAMA_BASE_URL
value: http://ollama-server-3.ollama:11434
- name: OLLAMA_KEEP_ALIVE
value: 5m
- name: INFERENCE_TEXT_MODEL
value: gemma3:4b
- name: INFERENCE_IMAGE_MODEL
value: granite3.2-vision:2b
- name: EMBEDDING_TEXT_MODEL
value: mxbai-embed-large
- name: INFERENCE_JOB_TIMEOUT_SEC
value: "720"
image: ghcr.io/karakeep-app/karakeep:0.28.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /data
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: karakeep
---
# Source: karakeep/charts/meilisearch/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: karakeep-meilisearch
labels:
helm.sh/chart: meilisearch-0.17.1
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/version: "v1.18.0"
app.kubernetes.io/component: search-engine
app.kubernetes.io/part-of: meilisearch
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
serviceName: karakeep-meilisearch
selector:
matchLabels:
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
template:
metadata:
labels:
helm.sh/chart: meilisearch-0.17.1
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/version: "v1.18.0"
app.kubernetes.io/component: search-engine
app.kubernetes.io/part-of: meilisearch
app.kubernetes.io/managed-by: Helm
annotations:
checksum/config: e3114e6f2910e1678611b9df77ee9eb63744c6e143f716dd8aa5f015391a2ef3
spec:
serviceAccountName: karakeep-meilisearch
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
volumes:
- name: tmp
emptyDir: {}
- name: data
persistentVolumeClaim:
claimName: karakeep-meilisearch
containers:
- name: meilisearch
image: "getmeili/meilisearch:v1.18.0"
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
volumeMounts:
- name: tmp
mountPath: /tmp
- name: data
mountPath: /meili_data
envFrom:
- configMapRef:
name: karakeep-meilisearch-environment
- secretRef:
name: karakeep-meilisearch-master-key-secret
ports:
- name: http
containerPort: 7700
protocol: TCP
startupProbe:
httpGet:
path: /health
port: http
periodSeconds: 1
initialDelaySeconds: 1
failureThreshold: 60
timeoutSeconds: 1
livenessProbe:
httpGet:
path: /health
port: http
periodSeconds: 10
initialDelaySeconds: 0
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /health
port: http
periodSeconds: 10
initialDelaySeconds: 0
timeoutSeconds: 10
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: karakeep/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: karakeep-key-secret
namespace: karakeep
labels:
app.kubernetes.io/name: karakeep-key-secret
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/karakeep/key
metadataPolicy: None
property: key
- secretKey: prometheus-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/karakeep/key
metadataPolicy: None
property: prometheus-token
---
# Source: karakeep/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: karakeep-oidc-secret
namespace: karakeep
labels:
app.kubernetes.io/name: karakeep-oidc-secret
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: AUTHENTIK_CLIENT_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/karakeep
metadataPolicy: None
property: client
- secretKey: AUTHENTIK_CLIENT_SECRET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/karakeep
metadataPolicy: None
property: secret
---
# Source: karakeep/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: karakeep-meilisearch-master-key-secret
namespace: karakeep
labels:
app.kubernetes.io/name: karakeep-meilisearch-master-key-secret
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: MEILI_MASTER_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/karakeep/meilisearch
metadataPolicy: None
property: MEILI_MASTER_KEY
---
# Source: karakeep/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: karakeep-cloudflared-secret
namespace: karakeep
labels:
app.kubernetes.io/name: karakeep-cloudflared-secret
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/karakeep
metadataPolicy: None
property: token
---
# Source: karakeep/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: karakeep-data-backup-secret
namespace: karakeep
labels:
app.kubernetes.io/name: karakeep-data-backup-secret
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/karakeep/karakeep-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: karakeep/templates/object-bucket-claim.yaml
apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
name: ceph-bucket-karakeep
labels:
app.kubernetes.io/name: ceph-bucket-karakeep
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
generateBucketName: bucket-karakeep
storageClassName: ceph-bucket
---
# Source: karakeep/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: karakeep-data-backup-source
namespace: karakeep
labels:
app.kubernetes.io/name: karakeep-data-backup-source
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
sourcePVC: karakeep-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: karakeep-data-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: karakeep/charts/meilisearch/templates/serviceMonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: karakeep-meilisearch
namespace: karakeep
labels:
helm.sh/chart: meilisearch-0.17.1
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/version: "v1.18.0"
app.kubernetes.io/component: search-engine
app.kubernetes.io/part-of: meilisearch
app.kubernetes.io/managed-by: Helm
spec:
jobLabel: karakeep
namespaceSelector:
matchNames:
- karakeep
selector:
matchLabels:
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
endpoints:
- port: http
path: /metrics
interval: 1m
scrapeTimeout: 10s
bearerTokenSecret:
name: karakeep-meilisearch-master-key-secret
key: MEILI_MASTER_KEY
---
# Source: karakeep/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: karakeep
namespace: karakeep
labels:
app.kubernetes.io/name: karakeep
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
endpoints:
- port: http
interval: 30s
scrapeTimeout: 15s
path: /api/metrics
authorization:
credentials:
key: prometheus-token
name: karakeep-key-secret
selector:
matchLabels:
app.kubernetes.io/name: karakeep
app.kubernetes.io/instance: karakeep
---
# Source: karakeep/charts/meilisearch/templates/tests/test-connection.yaml
apiVersion: v1
kind: Pod
metadata:
name: karakeep-meilisearch-test-connection
labels:
app.kubernetes.io/name: meilisearch
helm.sh/chart: meilisearch-0.17.1
app.kubernetes.io/instance: karakeep
app.kubernetes.io/managed-by: Helm
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['karakeep-meilisearch:7700']
restartPolicy: Never

View File

@@ -0,0 +1,157 @@
---
# Source: kiwix/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: kiwix-nfs-storage
namespace: kiwix
labels:
app.kubernetes.io/name: kiwix-nfs-storage
app.kubernetes.io/instance: kiwix
app.kubernetes.io/part-of: kiwix
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Kiwix
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: kiwix/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: kiwix-nfs-storage
namespace: kiwix
labels:
app.kubernetes.io/name: kiwix-nfs-storage
app.kubernetes.io/instance: kiwix
app.kubernetes.io/part-of: kiwix
spec:
volumeName: kiwix-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: kiwix/charts/kiwix/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: kiwix
labels:
app.kubernetes.io/instance: kiwix
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kiwix
app.kubernetes.io/service: kiwix
helm.sh/chart: kiwix-4.4.0
namespace: kiwix
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: kiwix
app.kubernetes.io/name: kiwix
---
# Source: kiwix/charts/kiwix/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kiwix
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: kiwix
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kiwix
helm.sh/chart: kiwix-4.4.0
namespace: kiwix
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: kiwix
app.kubernetes.io/instance: kiwix
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: kiwix
app.kubernetes.io/name: kiwix
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- '*.zim'
env:
- name: PORT
value: "8080"
image: ghcr.io/kiwix/kiwix-serve:3.8.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 50m
memory: 512Mi
volumeMounts:
- mountPath: /data
name: media
readOnly: true
volumes:
- name: media
persistentVolumeClaim:
claimName: kiwix-nfs-storage
---
# Source: kiwix/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-kiwix
namespace: kiwix
labels:
app.kubernetes.io/name: http-route-kiwix
app.kubernetes.io/instance: kiwix
app.kubernetes.io/part-of: kiwix
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- kiwix.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: kiwix
port: 80
weight: 100

View File

@@ -0,0 +1,129 @@
---
# Source: libation/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: libation-nfs-storage
namespace: libation
labels:
app.kubernetes.io/name: libation-nfs-storage
app.kubernetes.io/instance: libation
app.kubernetes.io/part-of: libation
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Audiobooks/
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: libation/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: libation-config
namespace: libation
labels:
app.kubernetes.io/name: libation-config
app.kubernetes.io/instance: libation
app.kubernetes.io/part-of: libation
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
volumeMode: Filesystem
---
# Source: libation/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: libation-nfs-storage
namespace: libation
labels:
app.kubernetes.io/name: libation-nfs-storage
app.kubernetes.io/instance: libation
app.kubernetes.io/part-of: libation
spec:
volumeName: libation-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: libation/charts/libation/templates/common.yaml
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: libation
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: libation
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: libation
helm.sh/chart: libation-4.4.0
namespace: libation
spec:
suspend: false
concurrencyPolicy: Forbid
startingDeadlineSeconds: 90
timeZone: US/Central
schedule: "30 4 * * *"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
parallelism: 1
backoffLimit: 3
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: libation
app.kubernetes.io/name: libation
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
restartPolicy: Never
containers:
- env:
- name: SLEEP_TIME
value: "-1"
- name: LIBATION_BOOKS_DIR
value: /data
image: rmcrackan/libation:12.7.4
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /data
name: data
volumes:
- name: config
persistentVolumeClaim:
claimName: libation-config
- name: data
persistentVolumeClaim:
claimName: libation-nfs-storage

View File

@@ -0,0 +1,928 @@
---
# Source: lidarr/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: lidarr-nfs-storage
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-nfs-storage
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: lidarr/charts/lidarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: lidarr-config
labels:
app.kubernetes.io/instance: lidarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: lidarr
helm.sh/chart: lidarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: lidarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: lidarr/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: lidarr-nfs-storage
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-nfs-storage
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
volumeName: lidarr-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: lidarr/charts/lidarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: lidarr
labels:
app.kubernetes.io/instance: lidarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: lidarr
app.kubernetes.io/service: lidarr
helm.sh/chart: lidarr-4.4.0
namespace: lidarr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8686
protocol: TCP
name: http
- port: 9792
targetPort: 9792
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: lidarr
app.kubernetes.io/name: lidarr
---
# Source: lidarr/charts/lidarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: lidarr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: lidarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: lidarr
helm.sh/chart: lidarr-4.4.0
namespace: lidarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: lidarr
app.kubernetes.io/instance: lidarr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: lidarr
app.kubernetes.io/name: lidarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 1000
runAsUser: 1000
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/lidarr:2.14.5@sha256:5e1235d00b5d1c1f60ca0d472e554a6611aef41aa7b5b6d88260214bf4809af0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- lidarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9792"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: lidarr-config
- name: media
persistentVolumeClaim:
claimName: lidarr-nfs-storage
---
# Source: lidarr/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: lidarr2-postgresql-17-cluster
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "lidarr2-postgresql-17-external-backup"
serverName: "lidarr2-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "lidarr2-postgresql-17-garage-local-backup"
serverName: "lidarr2-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "lidarr2-postgresql-17-recovery"
serverName: lidarr2-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 200m
memory: 1Gi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: lidarr2-postgresql-17-backup-1
externalClusters:
- name: lidarr2-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "lidarr2-postgresql-17-recovery"
serverName: lidarr2-postgresql-17-backup-1
---
# Source: lidarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: lidarr-config-backup-secret
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-config-backup-secret
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/lidarr2/lidarr2-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: lidarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: lidarr-postgresql-17-cluster-backup-secret
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: lidarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: lidarr-postgresql-17-cluster-backup-secret-garage
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: lidarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-lidarr
namespace: lidarr
labels:
app.kubernetes.io/name: http-route-lidarr
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- lidarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: lidarr
port: 80
weight: 100
---
# Source: lidarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "lidarr2-postgresql-17-external-backup"
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/lidarr2/lidarr2-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: lidarr-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: lidarr-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: lidarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "lidarr2-postgresql-17-garage-local-backup"
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/lidarr/lidarr2-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: lidarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: lidarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: lidarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: lidarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "lidarr2-postgresql-17-recovery"
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/lidarr/lidarr2-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: lidarr-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: lidarr-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: lidarr/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: lidarr2-postgresql-17-alert-rules
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/lidarr2-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="lidarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="lidarr"}) < 1
for: 5m
labels:
severity: critical
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="lidarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="lidarr"}) < 2
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="lidarr",pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="lidarr",pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
---
# Source: lidarr/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: lidarr
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
groups:
- name: lidarr
rules:
- alert: ExportarrAbsent
annotations:
description: Lidarr Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*lidarr.*"} == 1)
for: 5m
labels:
severity: critical
- alert: LidarrDown
annotations:
description: Lidarr service is down.
summary: Lidarr is down.
expr: |
lidarr_system_status{job=~".*lidarr.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: lidarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: lidarr-config-backup-source
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-config-backup-source
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
sourcePVC: lidarr-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: lidarr-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: lidarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "lidarr2-postgresql-17-daily-backup-scheduled-backup"
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: lidarr2-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "lidarr2-postgresql-17-external-backup"
---
# Source: lidarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "lidarr2-postgresql-17-live-backup-scheduled-backup"
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: lidarr2-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "lidarr2-postgresql-17-garage-local-backup"
---
# Source: lidarr/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: lidarr
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
selector:
matchLabels:
app.kubernetes.io/name: lidarr
app.kubernetes.io/instance: lidarr
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -0,0 +1,221 @@
---
# Source: lidatube/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: lidatube-nfs-storage
namespace: lidatube
labels:
app.kubernetes.io/name: lidatube-nfs-storage
app.kubernetes.io/instance: lidatube
app.kubernetes.io/part-of: lidatube
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Music
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: lidatube/charts/lidatube/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: lidatube-config
labels:
app.kubernetes.io/instance: lidatube
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: lidatube
helm.sh/chart: lidatube-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: lidatube
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: lidatube/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: lidatube-nfs-storage
namespace: lidatube
labels:
app.kubernetes.io/name: lidatube-nfs-storage
app.kubernetes.io/instance: lidatube
app.kubernetes.io/part-of: lidatube
spec:
volumeName: lidatube-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: lidatube/charts/lidatube/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: lidatube
labels:
app.kubernetes.io/instance: lidatube
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: lidatube
app.kubernetes.io/service: lidatube
helm.sh/chart: lidatube-4.4.0
namespace: lidatube
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 5000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: lidatube
app.kubernetes.io/name: lidatube
---
# Source: lidatube/charts/lidatube/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: lidatube
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: lidatube
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: lidatube
helm.sh/chart: lidatube-4.4.0
namespace: lidatube
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: lidatube
app.kubernetes.io/instance: lidatube
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: lidatube
app.kubernetes.io/name: lidatube
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: lidarr_address
value: http://lidarr.lidarr:80
- name: lidarr_api_key
valueFrom:
secretKeyRef:
key: lidarr_api_key
name: lidatube-secret
- name: sleep_interval
value: "360"
- name: sync_schedule
value: "4"
- name: attempt_lidarr_import
value: "true"
image: thewicklowwolf/lidatube:0.2.41
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /lidatube/config
name: config
- mountPath: /lidatube/downloads
name: music
volumes:
- name: config
persistentVolumeClaim:
claimName: lidatube-config
- name: music
persistentVolumeClaim:
claimName: lidatube-nfs-storage
---
# Source: lidatube/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: lidatube-secret
namespace: lidatube
labels:
app.kubernetes.io/name: lidatube-secret
app.kubernetes.io/instance: lidatube
app.kubernetes.io/part-of: lidatube
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: lidarr_api_key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/lidarr2/key
metadataPolicy: None
property: key
---
# Source: lidatube/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-lidatube
namespace: lidatube
labels:
app.kubernetes.io/name: http-route-lidatube
app.kubernetes.io/instance: lidatube
app.kubernetes.io/part-of: lidatube
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- lidatube.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: lidatube
port: 80
weight: 100

View File

@@ -0,0 +1,180 @@
---
# Source: listenarr/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: listenarr-nfs-storage
namespace: listenarr
labels:
app.kubernetes.io/name: listenarr-nfs-storage
app.kubernetes.io/instance: listenarr
app.kubernetes.io/part-of: listenarr
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Audiobooks
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: listenarr/charts/listenarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: listenarr
labels:
app.kubernetes.io/instance: listenarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: listenarr
helm.sh/chart: listenarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: listenarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: listenarr/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: listenarr-nfs-storage
namespace: listenarr
labels:
app.kubernetes.io/name: listenarr-nfs-storage
app.kubernetes.io/instance: listenarr
app.kubernetes.io/part-of: listenarr
spec:
volumeName: listenarr-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: listenarr/charts/listenarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: listenarr
labels:
app.kubernetes.io/instance: listenarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: listenarr
app.kubernetes.io/service: listenarr
helm.sh/chart: listenarr-4.4.0
namespace: listenarr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 5000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: listenarr
app.kubernetes.io/name: listenarr
---
# Source: listenarr/charts/listenarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: listenarr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: listenarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: listenarr
helm.sh/chart: listenarr-4.4.0
namespace: listenarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: listenarr
app.kubernetes.io/instance: listenarr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: listenarr
app.kubernetes.io/name: listenarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: LISTENARR_PUBLIC_URL
value: https://listenarr.alexlebens.net
image: therobbiedavis/listenarr:canary-0.2.35
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 50m
memory: 128Mi
volumeMounts:
- mountPath: /app/config
name: config
- mountPath: /data
name: media
volumes:
- name: config
persistentVolumeClaim:
claimName: listenarr
- name: media
persistentVolumeClaim:
claimName: listenarr-nfs-storage
---
# Source: listenarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-listenarr
namespace: listenarr
labels:
app.kubernetes.io/name: http-route-listenarr
app.kubernetes.io/instance: listenarr
app.kubernetes.io/part-of: listenarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- listenarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: listenarr
port: 80
weight: 100

View File

@@ -0,0 +1,100 @@
---
# Source: omni-tools/charts/omni-tools/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: omni-tools
labels:
app.kubernetes.io/instance: omni-tools
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: omni-tools
app.kubernetes.io/service: omni-tools
helm.sh/chart: omni-tools-4.4.0
namespace: omni-tools
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: omni-tools
app.kubernetes.io/name: omni-tools
---
# Source: omni-tools/charts/omni-tools/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: omni-tools
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: omni-tools
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: omni-tools
helm.sh/chart: omni-tools-4.4.0
namespace: omni-tools
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: omni-tools
app.kubernetes.io/instance: omni-tools
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: omni-tools
app.kubernetes.io/name: omni-tools
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- image: iib0011/omni-tools:0.6.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 50m
memory: 512Mi
---
# Source: omni-tools/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-omni-tools
namespace: omni-tools
labels:
app.kubernetes.io/name: http-route-omni-tools
app.kubernetes.io/instance: omni-tools
app.kubernetes.io/part-of: omni-tools
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- omni-tools.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: omni-tools
port: 80
weight: 100

View File

@@ -0,0 +1,988 @@
---
# Source: outline/charts/outline/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: outline
labels:
app.kubernetes.io/instance: outline
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: outline
app.kubernetes.io/service: outline
helm.sh/chart: outline-4.4.0
namespace: outline
spec:
type: ClusterIP
ports:
- port: 3000
targetPort: 3000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: outline
app.kubernetes.io/name: outline
---
# Source: outline/charts/cloudflared-outline/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: outline-cloudflared-outline
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: outline
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cloudflared-outline
app.kubernetes.io/version: 2025.10.0
helm.sh/chart: cloudflared-outline-1.23.0
namespace: outline
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: cloudflared-outline
app.kubernetes.io/instance: outline
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: outline
app.kubernetes.io/name: cloudflared-outline
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- --protocol
- http2
- --no-autoupdate
- run
- --token
- $(CF_MANAGED_TUNNEL_TOKEN)
env:
- name: CF_MANAGED_TUNNEL_TOKEN
valueFrom:
secretKeyRef:
key: cf-tunnel-token
name: outline-cloudflared-secret
image: cloudflare/cloudflared:2025.11.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: outline/charts/outline/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: outline
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: outline
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: outline
helm.sh/chart: outline-4.4.0
namespace: outline
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: outline
app.kubernetes.io/instance: outline
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: outline
app.kubernetes.io/name: outline
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: NODE_ENV
value: production
- name: URL
value: https://wiki.alexlebens.dev
- name: PORT
value: "3000"
- name: SECRET_KEY
valueFrom:
secretKeyRef:
key: secret-key
name: outline-key-secret
- name: UTILS_SECRET
valueFrom:
secretKeyRef:
key: utils-key
name: outline-key-secret
- name: POSTGRES_USERNAME
valueFrom:
secretKeyRef:
key: username
name: outline-postgresql-17-cluster-app
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: outline-postgresql-17-cluster-app
- name: POSTGRES_DATABASE_NAME
valueFrom:
secretKeyRef:
key: dbname
name: outline-postgresql-17-cluster-app
- name: POSTGRES_DATABASE_HOST
valueFrom:
secretKeyRef:
key: host
name: outline-postgresql-17-cluster-app
- name: POSTGRES_DATABASE_PORT
valueFrom:
secretKeyRef:
key: port
name: outline-postgresql-17-cluster-app
- name: DATABASE_URL
value: postgres://$(POSTGRES_USERNAME):$(POSTGRES_PASSWORD)@$(POSTGRES_DATABASE_HOST):$(POSTGRES_DATABASE_PORT)/$(POSTGRES_DATABASE_NAME)
- name: DATABASE_URL_TEST
value: postgres://$(POSTGRES_USERNAME):$(POSTGRES_PASSWORD)@$(POSTGRES_DATABASE_HOST):$(POSTGRES_DATABASE_PORT)/$(POSTGRES_DATABASE_NAME)-test
- name: DATABASE_CONNECTION_POOL_MIN
value: "2"
- name: DATABASE_CONNECTION_POOL_MAX
value: "20"
- name: PGSSLMODE
value: disable
- name: REDIS_URL
value: redis://redis-replication-outline-master.outline:6379
- name: FILE_STORAGE
value: s3
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: ceph-bucket-outline
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: ceph-bucket-outline
- name: AWS_REGION
value: us-east-1
- name: AWS_S3_UPLOAD_BUCKET_NAME
valueFrom:
configMapKeyRef:
key: BUCKET_NAME
name: ceph-bucket-outline
- name: AWS_S3_UPLOAD_BUCKET_URL
value: https://objects.alexlebens.dev
- name: AWS_S3_FORCE_PATH_STYLE
value: "true"
- name: AWS_S3_ACL
value: private
- name: FILE_STORAGE_UPLOAD_MAX_SIZE
value: "26214400"
- name: FORCE_HTTPS
value: "false"
- name: ENABLE_UPDATES
value: "false"
- name: WEB_CONCURRENCY
value: "1"
- name: FILE_STORAGE_IMPORT_MAX_SIZE
value: "5.12e+06"
- name: LOG_LEVEL
value: info
- name: DEFAULT_LANGUAGE
value: en_US
- name: RATE_LIMITER_ENABLED
value: "false"
- name: DEVELOPMENT_UNSAFE_INLINE_CSP
value: "false"
- name: OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
key: client
name: outline-oidc-secret
- name: OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
key: secret
name: outline-oidc-secret
- name: OIDC_AUTH_URI
value: https://auth.alexlebens.dev/application/o/authorize/
- name: OIDC_TOKEN_URI
value: https://auth.alexlebens.dev/application/o/token/
- name: OIDC_USERINFO_URI
value: https://auth.alexlebens.dev/application/o/userinfo/
- name: OIDC_USERNAME_CLAIM
value: email
- name: OIDC_DISPLAY_NAME
value: Authentik
- name: OIDC_SCOPES
value: openid profile email
image: outlinewiki/outline:1.1.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 512Mi
---
# Source: outline/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: outline-postgresql-17-cluster
namespace: outline
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: outline-postgresql-17
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "outline-postgresql-17-external-backup"
serverName: "outline-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "outline-postgresql-17-garage-local-backup"
serverName: "outline-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "outline-postgresql-17-recovery"
serverName: outline-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: outline-postgresql-17-backup-1
externalClusters:
- name: outline-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "outline-postgresql-17-recovery"
serverName: outline-postgresql-17-backup-1
---
# Source: outline/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: outline-key-secret
namespace: outline
labels:
app.kubernetes.io/name: outline-key-secret
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: secret-key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/outline/key
metadataPolicy: None
property: secret-key
- secretKey: utils-key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/outline/key
metadataPolicy: None
property: utils-key
---
# Source: outline/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: outline-oidc-secret
namespace: outline
labels:
app.kubernetes.io/name: outline-oidc-secret
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: client
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/outline
metadataPolicy: None
property: client
- secretKey: secret
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/outline
metadataPolicy: None
property: secret
---
# Source: outline/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: outline-cloudflared-secret
namespace: outline
labels:
app.kubernetes.io/name: outline-cloudflared-secret
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/outline
metadataPolicy: None
property: token
---
# Source: outline/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: outline-postgresql-17-cluster-backup-secret
namespace: outline
labels:
app.kubernetes.io/name: outline-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: outline/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: outline-postgresql-17-cluster-backup-secret-garage
namespace: outline
labels:
app.kubernetes.io/name: outline-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: outline/templates/object-bucket-claim.yaml
apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
name: ceph-bucket-outline
labels:
app.kubernetes.io/name: ceph-bucket-outline
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
spec:
generateBucketName: bucket-outline
storageClassName: ceph-bucket
additionalConfig:
bucketPolicy: |
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor",
"Effect": "Allow",
"Action": [
"s3:GetObjectAcl",
"s3:DeleteObject",
"s3:PutObject",
"s3:GetObject",
"s3:PutObjectAcl"
],
"Resource": "arn:aws:s3:::bucket-outline-630c57e0-d475-4d78-926c-c1c082291d73/*"
}
]
}
---
# Source: outline/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "outline-postgresql-17-external-backup"
namespace: outline
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: outline-postgresql-17
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/outline/outline-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: outline-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: outline-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: outline/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "outline-postgresql-17-garage-local-backup"
namespace: outline
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: outline-postgresql-17
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/outline/outline-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: outline-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: outline-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: outline-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: outline/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "outline-postgresql-17-recovery"
namespace: outline
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: outline-postgresql-17
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/outline/outline-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: outline-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: outline-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: outline/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: outline-postgresql-17-alert-rules
namespace: outline
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: outline-postgresql-17
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/outline-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="outline"} - cnpg_pg_replication_is_wal_receiver_up{namespace="outline"}) < 1
for: 5m
labels:
severity: critical
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="outline"} - cnpg_pg_replication_is_wal_receiver_up{namespace="outline"}) < 2
for: 5m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="outline",pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="outline",pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
---
# Source: outline/templates/redis-replication.yaml
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: redis-replication-outline
namespace: outline
labels:
app.kubernetes.io/name: redis-replication-outline
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v8.0.3
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 50m
memory: 128Mi
storage:
volumeClaimTemplate:
spec:
storageClassName: ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.48.0
---
# Source: outline/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "outline-postgresql-17-daily-backup-scheduled-backup"
namespace: outline
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: outline-postgresql-17
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: outline-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "outline-postgresql-17-external-backup"
---
# Source: outline/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "outline-postgresql-17-live-backup-scheduled-backup"
namespace: outline
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: outline-postgresql-17
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: outline-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "outline-postgresql-17-garage-local-backup"
---
# Source: outline/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-replication-outline
namespace: outline
labels:
app.kubernetes.io/name: redis-replication-outline
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
redis-operator: "true"
env: production
spec:
selector:
matchLabels:
redis_setup_type: replication
endpoints:
- port: redis-exporter
interval: 30s
scrapeTimeout: 10s

View File

@@ -0,0 +1,215 @@
---
# Source: overseerr/charts/app-template/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: overseerr-main
labels:
app.kubernetes.io/instance: overseerr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: overseerr
helm.sh/chart: app-template-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: overseerr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: overseerr/charts/app-template/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: overseerr
labels:
app.kubernetes.io/instance: overseerr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: overseerr
app.kubernetes.io/service: overseerr
helm.sh/chart: app-template-4.4.0
namespace: overseerr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 5055
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: overseerr
app.kubernetes.io/name: overseerr
---
# Source: overseerr/charts/app-template/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: overseerr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: overseerr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: overseerr
helm.sh/chart: app-template-4.4.0
namespace: overseerr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: overseerr
app.kubernetes.io/instance: overseerr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: overseerr
app.kubernetes.io/name: overseerr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
image: ghcr.io/sct/overseerr:1.34.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 512Mi
volumeMounts:
- mountPath: /app/config
name: main
volumes:
- name: main
persistentVolumeClaim:
claimName: overseerr-main
---
# Source: overseerr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: overseerr-main-backup-secret
namespace: overseerr
labels:
app.kubernetes.io/name: overseerr-main-backup-secret
app.kubernetes.io/instance: overseerr
app.kubernetes.io/part-of: overseerr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/overseerr/overseerr-main"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: overseerr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-overseerr
namespace: overseerr
labels:
app.kubernetes.io/name: http-route-overseerr
app.kubernetes.io/instance: overseerr
app.kubernetes.io/part-of: overseerr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- overseerr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: overseerr
port: 80
weight: 100
---
# Source: overseerr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: overseerr-main-backup-source
namespace: overseerr
labels:
app.kubernetes.io/name: overseerr-main-backup-source
app.kubernetes.io/instance: overseerr
app.kubernetes.io/part-of: overseerr
spec:
sourcePVC: overseerr-main
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: overseerr-main-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot

View File

@@ -0,0 +1,773 @@
---
# Source: photoview/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: photoview-nfs-storage
namespace: photoview
labels:
app.kubernetes.io/name: photoview-nfs-storage
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Pictures
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: photoview/charts/photoview/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: photoview-cache
labels:
app.kubernetes.io/instance: photoview
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: photoview
helm.sh/chart: photoview-4.4.0
namespace: photoview
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: photoview/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: photoview-nfs-storage
namespace: photoview
labels:
app.kubernetes.io/name: photoview-nfs-storage
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
spec:
volumeName: photoview-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: photoview/charts/photoview/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: photoview
labels:
app.kubernetes.io/instance: photoview
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: photoview
app.kubernetes.io/service: photoview
helm.sh/chart: photoview-4.4.0
namespace: photoview
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: photoview
app.kubernetes.io/name: photoview
---
# Source: photoview/charts/photoview/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: photoview
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: photoview
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: photoview
helm.sh/chart: photoview-4.4.0
namespace: photoview
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: photoview
app.kubernetes.io/instance: photoview
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: photoview
app.kubernetes.io/name: photoview
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
initContainers:
- command:
- /bin/sh
- -ec
- |
/bin/chown -R 999:999 /app/cache
image: busybox:1.37.0
imagePullPolicy: IfNotPresent
name: init-chmod-data
resources:
requests:
cpu: 100m
memory: 128Mi
securityContext:
runAsUser: 0
volumeMounts:
- mountPath: /app/cache
name: cache
containers:
- env:
- name: PHOTOVIEW_DATABASE_DRIVER
value: postgres
- name: PHOTOVIEW_POSTGRES_URL
valueFrom:
secretKeyRef:
key: uri
name: photoview-postgresql-17-cluster-app
- name: PHOTOVIEW_MEDIA_CACHE
value: /app/cache
image: photoview/photoview:2.4.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 512Mi
volumeMounts:
- mountPath: /app/cache
name: cache
- mountPath: /photos
name: media
readOnly: true
volumes:
- name: cache
persistentVolumeClaim:
claimName: photoview-cache
- name: media
persistentVolumeClaim:
claimName: photoview-nfs-storage
---
# Source: photoview/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: photoview-postgresql-17-cluster
namespace: photoview
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: photoview-postgresql-17
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "photoview-postgresql-17-external-backup"
serverName: "photoview-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "photoview-postgresql-17-garage-local-backup"
serverName: "photoview-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "photoview-postgresql-17-recovery"
serverName: photoview-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: photoview-postgresql-17-backup-1
externalClusters:
- name: photoview-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "photoview-postgresql-17-recovery"
serverName: photoview-postgresql-17-backup-1
---
# Source: photoview/templates/external-secrets.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: photoview-postgresql-17-cluster-backup-secret
namespace: photoview
labels:
app.kubernetes.io/name: photoview-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: photoview/templates/external-secrets.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: photoview-postgresql-17-cluster-backup-secret-garage
namespace: photoview
labels:
app.kubernetes.io/name: photoview-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: photoview/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-photoview
namespace: photoview
labels:
app.kubernetes.io/name: http-route-photoview
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- photoview.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: photoview
port: 80
weight: 100
---
# Source: photoview/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "photoview-postgresql-17-external-backup"
namespace: photoview
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: photoview-postgresql-17
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/photoview/photoview-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: photoview-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: photoview-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: photoview/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "photoview-postgresql-17-garage-local-backup"
namespace: photoview
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: photoview-postgresql-17
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/photoview/photoview-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: photoview-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: photoview-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: photoview-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: photoview/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "photoview-postgresql-17-recovery"
namespace: photoview
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: photoview-postgresql-17
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/photoview/photoview-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: photoview-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: photoview-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: photoview/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: photoview-postgresql-17-alert-rules
namespace: photoview
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: photoview-postgresql-17
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/photoview-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="photoview"} - cnpg_pg_replication_is_wal_receiver_up{namespace="photoview"}) < 1
for: 5m
labels:
severity: critical
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="photoview"} - cnpg_pg_replication_is_wal_receiver_up{namespace="photoview"}) < 2
for: 5m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="photoview",pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="photoview",pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
---
# Source: photoview/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "photoview-postgresql-17-daily-backup-scheduled-backup"
namespace: photoview
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: photoview-postgresql-17
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: photoview-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "photoview-postgresql-17-external-backup"
---
# Source: photoview/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "photoview-postgresql-17-live-backup-scheduled-backup"
namespace: photoview
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: photoview-postgresql-17
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: photoview-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "photoview-postgresql-17-garage-local-backup"

View File

@@ -0,0 +1,190 @@
---
# Source: plex/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: plex-nfs-storage
namespace: plex
labels:
app.kubernetes.io/name: plex-nfs-storage
app.kubernetes.io/instance: plex
app.kubernetes.io/part-of: plex
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: plex/charts/plex/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: plex-config
labels:
app.kubernetes.io/instance: plex
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: plex
helm.sh/chart: plex-4.4.0
namespace: plex
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "100Gi"
storageClassName: "ceph-block"
---
# Source: plex/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: plex-nfs-storage
namespace: plex
labels:
app.kubernetes.io/name: plex-nfs-storage
app.kubernetes.io/instance: plex
app.kubernetes.io/part-of: plex
spec:
volumeName: plex-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: plex/charts/plex/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: plex
labels:
app.kubernetes.io/instance: plex
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: plex
app.kubernetes.io/service: plex
helm.sh/chart: plex-4.4.0
namespace: plex
spec:
type: LoadBalancer
ports:
- port: 32400
targetPort: 32400
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: plex
app.kubernetes.io/name: plex
---
# Source: plex/charts/plex/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: plex
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: plex
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: plex
helm.sh/chart: plex-4.4.0
namespace: plex
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: plex
app.kubernetes.io/instance: plex
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: plex
app.kubernetes.io/name: plex
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: VERSION
value: docker
- name: PLEX_CLAIM
value: claim-XmGK2o9x54PbCzQaqj-J
image: ghcr.io/linuxserver/plex:1.42.2@sha256:ab81c7313fb5dc4d1f9562e5bbd5e5877a8a3c5ca6b9f9fff3437b5096a2b123
imagePullPolicy: IfNotPresent
name: main
resources:
limits:
gpu.intel.com/i915: 1
requests:
cpu: 10m
gpu.intel.com/i915: 1
memory: 512Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
readOnly: true
- mountPath: /transcode
name: transcode
volumes:
- name: config
persistentVolumeClaim:
claimName: plex-config
- name: media
persistentVolumeClaim:
claimName: plex-nfs-storage
- emptyDir: {}
name: transcode
---
# Source: plex/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-plex
namespace: plex
labels:
app.kubernetes.io/name: http-route-plex
app.kubernetes.io/instance: plex
app.kubernetes.io/part-of: plex
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- plex.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: plex
port: 32400
weight: 100

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,235 @@
---
# Source: prowlarr/charts/prowlarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: prowlarr-config
labels:
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: prowlarr
helm.sh/chart: prowlarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: prowlarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: prowlarr/charts/prowlarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: prowlarr
labels:
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: prowlarr
app.kubernetes.io/service: prowlarr
helm.sh/chart: prowlarr-4.4.0
namespace: prowlarr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 9696
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/name: prowlarr
---
# Source: prowlarr/charts/prowlarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: prowlarr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: prowlarr
helm.sh/chart: prowlarr-4.4.0
namespace: prowlarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: prowlarr
app.kubernetes.io/instance: prowlarr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/name: prowlarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 568
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 568
runAsUser: 568
supplementalGroups:
- 44
- 100
- 109
- 65539
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
image: ghcr.io/linuxserver/prowlarr:2.3.0@sha256:475853535de3de8441b87c1457c30f2e695f4831228b12b6b7274e9da409d874
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
volumes:
- name: config
persistentVolumeClaim:
claimName: prowlarr-config
---
# Source: prowlarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: prowlarr-config-backup-secret
namespace: prowlarr
labels:
app.kubernetes.io/name: prowlarr-config-backup-secret
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/part-of: prowlarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/prowlarr/prowlarr-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: prowlarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-prowlarr
namespace: prowlarr
labels:
app.kubernetes.io/name: http-route-prowlarr
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/part-of: prowlarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- prowlarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: prowlarr
port: 80
weight: 100
---
# Source: prowlarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: prowlarr-config-backup-source
namespace: prowlarr
labels:
app.kubernetes.io/name: prowlarr-config-backup-source
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/part-of: prowlarr
spec:
sourcePVC: prowlarr-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: prowlarr-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 568
runAsGroup: 568
fsGroup: 568
fsGroupChangePolicy: OnRootMismatch
supplementalGroups:
- 44
- 100
- 109
- 65539
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot

View File

@@ -0,0 +1,930 @@
---
# Source: radarr-4k/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: radarr-4k-nfs-storage
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k-nfs-storage
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: radarr-4k/charts/radarr-4k/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: radarr-4k-config
labels:
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-4k
helm.sh/chart: radarr-4k-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: radarr-4k
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "20Gi"
storageClassName: "ceph-block"
---
# Source: radarr-4k/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: radarr-4k-nfs-storage
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k-nfs-storage
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
volumeName: radarr-4k-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: radarr-4k/charts/radarr-4k/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: radarr-4k
labels:
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-4k
app.kubernetes.io/service: radarr-4k
helm.sh/chart: radarr-4k-4.4.0
namespace: radarr-4k
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 7878
protocol: TCP
name: http
- port: 9793
targetPort: 9793
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/name: radarr-4k
---
# Source: radarr-4k/charts/radarr-4k/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: radarr-4k
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-4k
helm.sh/chart: radarr-4k-4.4.0
namespace: radarr-4k
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: radarr-4k
app.kubernetes.io/instance: radarr-4k
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/name: radarr-4k
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 1000
runAsUser: 1000
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/radarr:6.0.4@sha256:06ac318ecb95a34c7b229568dcb4271f02cb5007bb189a0dd67a2032864187ca
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- radarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9793"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: radarr-4k-config
- name: media
persistentVolumeClaim:
claimName: radarr-4k-nfs-storage
---
# Source: radarr-4k/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: radarr5-4k-postgresql-17-cluster
namespace: radarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-4k-postgresql-17
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-4k-postgresql-17-external-backup"
serverName: "radarr5-4k-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "radarr5-4k-postgresql-17-garage-local-backup"
serverName: "radarr5-4k-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-4k-postgresql-17-recovery"
serverName: radarr5-4k-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 200m
memory: 1Gi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: radarr5-4k-postgresql-17-backup-1
externalClusters:
- name: radarr5-4k-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-4k-postgresql-17-recovery"
serverName: radarr5-4k-postgresql-17-backup-1
---
# Source: radarr-4k/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-4k-config-backup-secret
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k-config-backup-secret
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/radarr5-4k/radarr5-4k-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: radarr-4k/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-4k-postgresql-17-cluster-backup-secret
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: radarr-4k/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-4k-postgresql-17-cluster-backup-secret-garage
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: radarr-4k/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-radarr-4k
namespace: radarr-4k
labels:
app.kubernetes.io/name: http-route-radarr-4k
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- radarr-4k.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: radarr-4k
port: 80
weight: 100
---
# Source: radarr-4k/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-4k-postgresql-17-external-backup"
namespace: radarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-4k-postgresql-17
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/radarr5-4k/radarr5-4k-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: radarr-4k-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-4k-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: radarr-4k/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-4k-postgresql-17-garage-local-backup"
namespace: radarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-4k-postgresql-17
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr-4k/radarr5-4k-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: radarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: radarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: radarr-4k/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-4k-postgresql-17-recovery"
namespace: radarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-4k-postgresql-17
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr5-4k/radarr5-4k-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: radarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: radarr-4k/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr5-4k-postgresql-17-alert-rules
namespace: radarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-4k-postgresql-17
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/radarr5-4k-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-4k"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-4k"}) < 1
for: 5m
labels:
severity: critical
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-4k"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-4k"}) < 2
for: 5m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="radarr-4k",pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="radarr-4k",pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
---
# Source: radarr-4k/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr-4k
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
groups:
- name: radarr-4k
rules:
- alert: ExportarrAbsent
annotations:
description: Radarr 4K Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*radarr-4k.*"} == 1)
for: 5m
labels:
severity: critical
- alert: Radarr4kDown
annotations:
description: Radarr 4K service is down.
summary: Radarr 4K is down.
expr: |
radarr_4k_system_status{job=~".*radarr-4k.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: radarr-4k/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: radarr-4k-config-backup-source
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k-config-backup-source
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
sourcePVC: radarr-4k-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: radarr-4k-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: radarr-4k/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-4k-postgresql-17-daily-backup-scheduled-backup"
namespace: radarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-4k-postgresql-17
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-4k-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-4k-postgresql-17-external-backup"
---
# Source: radarr-4k/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-4k-postgresql-17-live-backup-scheduled-backup"
namespace: radarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-4k-postgresql-17
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-4k-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-4k-postgresql-17-garage-local-backup"
---
# Source: radarr-4k/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: radarr-4k
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
selector:
matchLabels:
app.kubernetes.io/name: radarr-4k
app.kubernetes.io/instance: radarr-4k
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -0,0 +1,928 @@
---
# Source: radarr-anime/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: radarr-anime-nfs-storage
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime-nfs-storage
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: radarr-anime/charts/radarr-anime/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: radarr-anime-config
labels:
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-anime
helm.sh/chart: radarr-anime-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: radarr-anime
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "20Gi"
storageClassName: "ceph-block"
---
# Source: radarr-anime/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: radarr-anime-nfs-storage
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime-nfs-storage
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
volumeName: radarr-anime-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: radarr-anime/charts/radarr-anime/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: radarr-anime
labels:
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-anime
app.kubernetes.io/service: radarr-anime
helm.sh/chart: radarr-anime-4.4.0
namespace: radarr-anime
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 7878
protocol: TCP
name: http
- port: 9793
targetPort: 9793
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/name: radarr-anime
---
# Source: radarr-anime/charts/radarr-anime/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: radarr-anime
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-anime
helm.sh/chart: radarr-anime-4.4.0
namespace: radarr-anime
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: radarr-anime
app.kubernetes.io/instance: radarr-anime
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/name: radarr-anime
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/radarr:6.0.4@sha256:06ac318ecb95a34c7b229568dcb4271f02cb5007bb189a0dd67a2032864187ca
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- radarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9793"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: radarr-anime-config
- name: media
persistentVolumeClaim:
claimName: radarr-anime-nfs-storage
---
# Source: radarr-anime/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: radarr5-anime-postgresql-17-cluster
namespace: radarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-anime-postgresql-17
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-anime-postgresql-17-external-backup"
serverName: "radarr5-anime-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "radarr5-anime-postgresql-17-garage-local-backup"
serverName: "radarr5-anime-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-anime-postgresql-17-recovery"
serverName: radarr5-anime-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: radarr5-anime-postgresql-17-backup-1
externalClusters:
- name: radarr5-anime-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-anime-postgresql-17-recovery"
serverName: radarr5-anime-postgresql-17-backup-1
---
# Source: radarr-anime/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-anime-config-backup-secret
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime-config-backup-secret
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/radarr5-anime/radarr5-anime-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: radarr-anime/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-anime-postgresql-17-cluster-backup-secret
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: radarr-anime/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-anime-postgresql-17-cluster-backup-secret-garage
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: radarr-anime/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-radarr-anime
namespace: radarr-anime
labels:
app.kubernetes.io/name: http-route-radarr-anime
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- radarr-anime.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: radarr-anime
port: 80
weight: 100
---
# Source: radarr-anime/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-anime-postgresql-17-external-backup"
namespace: radarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-anime-postgresql-17
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/radarr5-anime/radarr5-anime-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: radarr-anime-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-anime-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: radarr-anime/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-anime-postgresql-17-garage-local-backup"
namespace: radarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-anime-postgresql-17
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr-anime/radarr5-anime-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: radarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: radarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: radarr-anime/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-anime-postgresql-17-recovery"
namespace: radarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-anime-postgresql-17
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr5-anime/radarr5-anime-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: radarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: radarr-anime/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr5-anime-postgresql-17-alert-rules
namespace: radarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-anime-postgresql-17
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/radarr5-anime-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-anime"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-anime"}) < 1
for: 5m
labels:
severity: critical
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-anime"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-anime"}) < 2
for: 5m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="radarr-anime",pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="radarr-anime",pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
---
# Source: radarr-anime/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr-anime
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
groups:
- name: radarr-anime
rules:
- alert: ExportarrAbsent
annotations:
description: Radarr Anime Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*radarr-anime.*"} == 1)
for: 5m
labels:
severity: critical
- alert: RadarrAnimeDown
annotations:
description: Radarr Anime service is down.
summary: Radarr Anime is down.
expr: |
radarr_anime_system_status{job=~".*radarr-anime.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: radarr-anime/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: radarr-anime-config-backup-source
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime-config-backup-source
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
sourcePVC: radarr-anime-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: radarr-anime-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: radarr-anime/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-anime-postgresql-17-daily-backup-scheduled-backup"
namespace: radarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-anime-postgresql-17
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-anime-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-anime-postgresql-17-external-backup"
---
# Source: radarr-anime/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-anime-postgresql-17-live-backup-scheduled-backup"
namespace: radarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-anime-postgresql-17
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-anime-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-anime-postgresql-17-garage-local-backup"
---
# Source: radarr-anime/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: radarr-anime
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
selector:
matchLabels:
app.kubernetes.io/name: radarr-anime
app.kubernetes.io/instance: radarr-anime
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -0,0 +1,928 @@
---
# Source: radarr-standup/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: radarr-standup-nfs-storage
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup-nfs-storage
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: radarr-standup/charts/radarr-standup/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: radarr-standup-config
labels:
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-standup
helm.sh/chart: radarr-standup-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: radarr-standup
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "20Gi"
storageClassName: "ceph-block"
---
# Source: radarr-standup/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: radarr-standup-nfs-storage
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup-nfs-storage
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
volumeName: radarr-standup-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: radarr-standup/charts/radarr-standup/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: radarr-standup
labels:
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-standup
app.kubernetes.io/service: radarr-standup
helm.sh/chart: radarr-standup-4.4.0
namespace: radarr-standup
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 7878
protocol: TCP
name: http
- port: 9793
targetPort: 9793
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/name: radarr-standup
---
# Source: radarr-standup/charts/radarr-standup/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: radarr-standup
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-standup
helm.sh/chart: radarr-standup-4.4.0
namespace: radarr-standup
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: radarr-standup
app.kubernetes.io/instance: radarr-standup
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/name: radarr-standup
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/radarr:6.0.4@sha256:06ac318ecb95a34c7b229568dcb4271f02cb5007bb189a0dd67a2032864187ca
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- radarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9793"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: radarr-standup-config
- name: media
persistentVolumeClaim:
claimName: radarr-standup-nfs-storage
---
# Source: radarr-standup/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: radarr5-standup-postgresql-17-cluster
namespace: radarr-standup
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-standup-postgresql-17
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-standup-postgresql-17-external-backup"
serverName: "radarr5-standup-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "radarr5-standup-postgresql-17-garage-local-backup"
serverName: "radarr5-standup-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-standup-postgresql-17-recovery"
serverName: radarr5-standup-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: radarr5-standup-postgresql-17-backup-1
externalClusters:
- name: radarr5-standup-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-standup-postgresql-17-recovery"
serverName: radarr5-standup-postgresql-17-backup-1
---
# Source: radarr-standup/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-standup-config-backup-secret
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup-config-backup-secret
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/radarr5-standup/radarr5-standup-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: radarr-standup/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-standup-postgresql-17-cluster-backup-secret
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: radarr-standup/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-standup-postgresql-17-cluster-backup-secret-garage
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: radarr-standup/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-radarr-standup
namespace: radarr-standup
labels:
app.kubernetes.io/name: http-route-radarr-standup
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- radarr-standup.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: radarr-standup
port: 80
weight: 100
---
# Source: radarr-standup/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-standup-postgresql-17-external-backup"
namespace: radarr-standup
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-standup-postgresql-17
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/radarr5-standup/radarr5-standup-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: radarr-standup-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-standup-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: radarr-standup/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-standup-postgresql-17-garage-local-backup"
namespace: radarr-standup
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-standup-postgresql-17
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr-standup/radarr5-standup-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: radarr-standup-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-standup-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: radarr-standup-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: radarr-standup/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-standup-postgresql-17-recovery"
namespace: radarr-standup
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-standup-postgresql-17
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr5-standup/radarr5-standup-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: radarr-standup-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-standup-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: radarr-standup/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr5-standup-postgresql-17-alert-rules
namespace: radarr-standup
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-standup-postgresql-17
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/radarr5-standup-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-standup"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-standup"}) < 1
for: 5m
labels:
severity: critical
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-standup"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-standup"}) < 2
for: 5m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="radarr-standup",pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="radarr-standup",pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
---
# Source: radarr-standup/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr-standup
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
groups:
- name: radarr-standup
rules:
- alert: ExportarrAbsent
annotations:
description: Radarr Stand Up Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*radarr-standup.*"} == 1)
for: 5m
labels:
severity: critical
- alert: RadarrStandUpDown
annotations:
description: Radarr Stand Up service is down.
summary: Radarr Stand Up is down.
expr: |
radarr_standup_system_status{job=~".*radarr-standup.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: radarr-standup/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: radarr-standup-config-backup-source
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup-config-backup-source
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
sourcePVC: radarr-standup-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: radarr-standup-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: radarr-standup/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-standup-postgresql-17-daily-backup-scheduled-backup"
namespace: radarr-standup
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-standup-postgresql-17
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-standup-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-standup-postgresql-17-external-backup"
---
# Source: radarr-standup/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-standup-postgresql-17-live-backup-scheduled-backup"
namespace: radarr-standup
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-standup-postgresql-17
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-standup-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-standup-postgresql-17-garage-local-backup"
---
# Source: radarr-standup/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: radarr-standup
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
selector:
matchLabels:
app.kubernetes.io/name: radarr-standup
app.kubernetes.io/instance: radarr-standup
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -0,0 +1,930 @@
---
# Source: radarr/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: radarr-nfs-storage
namespace: radarr
labels:
app.kubernetes.io/name: radarr-nfs-storage
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: radarr/charts/radarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: radarr-config
labels:
app.kubernetes.io/instance: radarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr
helm.sh/chart: radarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: radarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "20Gi"
storageClassName: "ceph-block"
---
# Source: radarr/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: radarr-nfs-storage
namespace: radarr
labels:
app.kubernetes.io/name: radarr-nfs-storage
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
volumeName: radarr-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: radarr/charts/radarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: radarr
labels:
app.kubernetes.io/instance: radarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr
app.kubernetes.io/service: radarr
helm.sh/chart: radarr-4.4.0
namespace: radarr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 7878
protocol: TCP
name: http
- port: 9793
targetPort: 9793
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr
app.kubernetes.io/name: radarr
---
# Source: radarr/charts/radarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: radarr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr
helm.sh/chart: radarr-4.4.0
namespace: radarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: radarr
app.kubernetes.io/instance: radarr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr
app.kubernetes.io/name: radarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 1000
runAsUser: 1000
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/radarr:6.0.4@sha256:06ac318ecb95a34c7b229568dcb4271f02cb5007bb189a0dd67a2032864187ca
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- radarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9793"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: radarr-config
- name: media
persistentVolumeClaim:
claimName: radarr-nfs-storage
---
# Source: radarr/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: radarr5-postgresql-17-cluster
namespace: radarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-postgresql-17
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-postgresql-17-external-backup"
serverName: "radarr5-postgresql-17-backup-2"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "radarr5-postgresql-17-garage-local-backup"
serverName: "radarr5-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-postgresql-17-recovery"
serverName: radarr5-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 200m
memory: 1Gi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: radarr5-postgresql-17-backup-1
externalClusters:
- name: radarr5-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-postgresql-17-recovery"
serverName: radarr5-postgresql-17-backup-1
---
# Source: radarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-config-backup-secret
namespace: radarr
labels:
app.kubernetes.io/name: radarr-config-backup-secret
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/radarr5/radarr5-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: radarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-postgresql-17-cluster-backup-secret
namespace: radarr
labels:
app.kubernetes.io/name: radarr-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: radarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-postgresql-17-cluster-backup-secret-garage
namespace: radarr
labels:
app.kubernetes.io/name: radarr-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: radarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-radarr
namespace: radarr
labels:
app.kubernetes.io/name: http-route-radarr
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- radarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: radarr
port: 80
weight: 100
---
# Source: radarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-postgresql-17-external-backup"
namespace: radarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-postgresql-17
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/radarr5/radarr5-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: radarr-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: radarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-postgresql-17-garage-local-backup"
namespace: radarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-postgresql-17
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr/radarr5-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: radarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: radarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: radarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-postgresql-17-recovery"
namespace: radarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-postgresql-17
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr5/radarr5-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: radarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: radarr/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr5-postgresql-17-alert-rules
namespace: radarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-postgresql-17
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/radarr5-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr"}) < 1
for: 5m
labels:
severity: critical
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr"}) < 2
for: 5m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="radarr",pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="radarr",pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
---
# Source: radarr/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr
namespace: radarr
labels:
app.kubernetes.io/name: radarr
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
groups:
- name: radarr
rules:
- alert: ExportarrAbsent
annotations:
description: Radarr Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*radarr.*"} == 1)
for: 5m
labels:
severity: critical
- alert: RadarrDown
annotations:
description: Radarr service is down.
summary: Radarr is down.
expr: |
radarr_system_status{job=~".*radarr.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: radarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: radarr-config-backup-source
namespace: radarr
labels:
app.kubernetes.io/name: radarr-config-backup-source
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
sourcePVC: radarr-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: radarr-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: radarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-postgresql-17-daily-backup-scheduled-backup"
namespace: radarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-postgresql-17
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-postgresql-17-external-backup"
---
# Source: radarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-postgresql-17-live-backup-scheduled-backup"
namespace: radarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: radarr5-postgresql-17
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-postgresql-17-garage-local-backup"
---
# Source: radarr/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: radarr
namespace: radarr
labels:
app.kubernetes.io/name: radarr
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
selector:
matchLabels:
app.kubernetes.io/name: radarr
app.kubernetes.io/instance: radarr
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,435 @@
---
# Source: searxng/charts/searxng/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: searxng-api-data
labels:
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: searxng/charts/searxng/templates/common.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: searxng-browser-data
labels:
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: searxng/charts/searxng/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: searxng-api
labels:
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
app.kubernetes.io/service: searxng-api
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
type: ClusterIP
ports:
- port: 8080
targetPort: 8080
protocol: TCP
name: mail
selector:
app.kubernetes.io/controller: api
app.kubernetes.io/instance: searxng
app.kubernetes.io/name: searxng
---
# Source: searxng/charts/searxng/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: searxng-browser
labels:
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
app.kubernetes.io/service: searxng-browser
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: mail
selector:
app.kubernetes.io/controller: browser
app.kubernetes.io/instance: searxng
app.kubernetes.io/name: searxng
---
# Source: searxng/charts/searxng/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: searxng-api
labels:
app.kubernetes.io/controller: api
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: api
app.kubernetes.io/name: searxng
app.kubernetes.io/instance: searxng
template:
metadata:
labels:
app.kubernetes.io/controller: api
app.kubernetes.io/instance: searxng
app.kubernetes.io/name: searxng
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: SEARXNG_BASE_URL
value: http://searxng-api.searxng:8080
- name: SEARXNG_QUERY_URL
value: http://searxng-api.searxng:8080/search?q=<query>
- name: SEARXNG_HOSTNAME
value: searxng-api.searxng
- name: UWSGI_WORKERS
value: "4"
- name: UWSGI_THREADS
value: "4"
- name: ENABLE_RAG_WEB_SEARCH
value: "true"
- name: RAG_WEB_SEARCH_ENGINE
value: searxng
- name: RAG_WEB_SEARCH_RESULT_COUNT
value: "3"
- name: RAG_WEB_SEARCH_CONCURRENT_REQUESTS
value: "10"
image: searxng/searxng:latest@sha256:0124d32d77e0c7360d0b85f5d91882d1837e6ceb243c82e190f5d7e9f1401334
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /etc/searxng
name: api-data
- mountPath: /etc/searxng/settings.yml
mountPropagation: None
name: config
readOnly: true
subPath: settings.yml
- mountPath: /etc/searxng/limiter.toml
mountPropagation: None
name: config
readOnly: true
subPath: limiter.toml
volumes:
- name: api-data
persistentVolumeClaim:
claimName: searxng-api-data
- name: config
secret:
secretName: searxng-api-config-secret
---
# Source: searxng/charts/searxng/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: searxng-browser
labels:
app.kubernetes.io/controller: browser
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: browser
app.kubernetes.io/name: searxng
app.kubernetes.io/instance: searxng
template:
metadata:
labels:
app.kubernetes.io/controller: browser
app.kubernetes.io/instance: searxng
app.kubernetes.io/name: searxng
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: SEARXNG_BASE_URL
value: https://searxng.alexlebens.net/
- name: SEARXNG_QUERY_URL
value: https://searxng.alexlebens.net/search?q=<query>
- name: SEARXNG_HOSTNAME
value: searxng.alexlebens.net
- name: SEARXNG_REDIS_URL
value: redis://redis-replication-searxng-master.searxng:6379/0
- name: UWSGI_WORKERS
value: "4"
- name: UWSGI_THREADS
value: "4"
image: searxng/searxng:latest@sha256:0124d32d77e0c7360d0b85f5d91882d1837e6ceb243c82e190f5d7e9f1401334
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /etc/searxng
name: browser-data
volumes:
- name: browser-data
persistentVolumeClaim:
claimName: searxng-browser-data
---
# Source: searxng/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: searxng-api-config-secret
namespace: searxng
labels:
app.kubernetes.io/name: searxng-api-config-secret
app.kubernetes.io/instance: searxng
app.kubernetes.io/part-of: searxng
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: settings.yml
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/searxng/api/config
metadataPolicy: None
property: settings.yml
- secretKey: limiter.toml
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/searxng/api/config
metadataPolicy: None
property: limiter.toml
---
# Source: searxng/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: searxng-browser-data-backup-secret
namespace: searxng
labels:
app.kubernetes.io/name: searxng-browser-data-backup-secret
app.kubernetes.io/instance: searxng
app.kubernetes.io/part-of: searxng
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/searxng/searxng-browser-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: searxng/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-searxng
namespace: searxng
labels:
app.kubernetes.io/name: http-route-searxng
app.kubernetes.io/instance: searxng
app.kubernetes.io/part-of: searxng
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- searxng.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: searxng-browser
port: 80
weight: 100
---
# Source: searxng/templates/redis-replication.yaml
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: redis-replication-searxng
namespace: searxng
labels:
app.kubernetes.io/name: redis-replication-searxng
app.kubernetes.io/instance: searxng
app.kubernetes.io/part-of: searxng
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v8.0.3
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 50m
memory: 128Mi
storage:
volumeClaimTemplate:
spec:
storageClassName: ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.48.0
---
# Source: searxng/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: searxng-browser-data-backup-source
namespace: searxng
labels:
app.kubernetes.io/name: searxng-browser-data-backup-source
app.kubernetes.io/instance: searxng
app.kubernetes.io/part-of: searxng
spec:
sourcePVC: searxng-browser-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: searxng-browser-data-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: searxng/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-replication-searxng
namespace: searxng
labels:
app.kubernetes.io/name: redis-replication-searxng
app.kubernetes.io/instance: searxng
app.kubernetes.io/part-of: searxng
redis-operator: "true"
env: production
spec:
selector:
matchLabels:
redis_setup_type: replication
endpoints:
- port: redis-exporter
interval: 30s
scrapeTimeout: 10s

View File

@@ -0,0 +1,153 @@
---
# Source: site-documentation/charts/site-documentation/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: site-documentation
labels:
app.kubernetes.io/instance: site-documentation
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: site-documentation
app.kubernetes.io/service: site-documentation
helm.sh/chart: site-documentation-4.4.0
namespace: site-documentation
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 4321
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-documentation
app.kubernetes.io/name: site-documentation
---
# Source: site-documentation/charts/cloudflared-site/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: site-documentation-cloudflared-site
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-documentation
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cloudflared-site
app.kubernetes.io/version: 2025.10.0
helm.sh/chart: cloudflared-site-1.23.0
namespace: site-documentation
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: cloudflared-site
app.kubernetes.io/instance: site-documentation
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-documentation
app.kubernetes.io/name: cloudflared-site
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- --protocol
- http2
- --no-autoupdate
- run
- --token
- $(CF_MANAGED_TUNNEL_TOKEN)
env:
- name: CF_MANAGED_TUNNEL_TOKEN
valueFrom:
secretKeyRef:
key: cf-tunnel-token
name: site-documentation-cloudflared-api-secret
image: cloudflare/cloudflared:2025.11.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: site-documentation/charts/site-documentation/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: site-documentation
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-documentation
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: site-documentation
helm.sh/chart: site-documentation-4.4.0
namespace: site-documentation
spec:
revisionHistoryLimit: 3
replicas: 3
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: site-documentation
app.kubernetes.io/instance: site-documentation
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-documentation
app.kubernetes.io/name: site-documentation
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- image: harbor.alexlebens.net/images/site-documentation:0.0.3
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: site-documentation/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: site-documentation-cloudflared-api-secret
namespace: site-documentation
labels:
app.kubernetes.io/name: site-documentation-cloudflared-api-secret
app.kubernetes.io/instance: site-documentation
app.kubernetes.io/part-of: site-documentation
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/site-documentation
metadataPolicy: None
property: token

View File

@@ -0,0 +1,153 @@
---
# Source: site-profile/charts/site-profile/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: site-profile
labels:
app.kubernetes.io/instance: site-profile
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: site-profile
app.kubernetes.io/service: site-profile
helm.sh/chart: site-profile-4.4.0
namespace: site-profile
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 4321
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-profile
app.kubernetes.io/name: site-profile
---
# Source: site-profile/charts/cloudflared-site/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: site-profile-cloudflared-site
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-profile
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cloudflared-site
app.kubernetes.io/version: 2025.10.0
helm.sh/chart: cloudflared-site-1.23.0
namespace: site-profile
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: cloudflared-site
app.kubernetes.io/instance: site-profile
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-profile
app.kubernetes.io/name: cloudflared-site
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- --protocol
- http2
- --no-autoupdate
- run
- --token
- $(CF_MANAGED_TUNNEL_TOKEN)
env:
- name: CF_MANAGED_TUNNEL_TOKEN
valueFrom:
secretKeyRef:
key: cf-tunnel-token
name: site-profile-cloudflared-api-secret
image: cloudflare/cloudflared:2025.11.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: site-profile/charts/site-profile/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: site-profile
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-profile
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: site-profile
helm.sh/chart: site-profile-4.4.0
namespace: site-profile
spec:
revisionHistoryLimit: 3
replicas: 3
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: site-profile
app.kubernetes.io/instance: site-profile
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-profile
app.kubernetes.io/name: site-profile
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- image: harbor.alexlebens.net/images/site-profile:2.1.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: site-profile/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: site-profile-cloudflared-api-secret
namespace: site-profile
labels:
app.kubernetes.io/name: site-profile-cloudflared-api-secret
app.kubernetes.io/instance: site-profile
app.kubernetes.io/part-of: site-profile
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/site-profile
metadataPolicy: None
property: token

View File

@@ -0,0 +1,396 @@
---
# Source: slskd/templates/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: slskd
labels:
app.kubernetes.io/name: slskd
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged
---
# Source: slskd/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: slskd-nfs-storage
namespace: slskd
labels:
app.kubernetes.io/name: slskd-nfs-storage
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: slskd/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: slskd-nfs-storage
namespace: slskd
labels:
app.kubernetes.io/name: slskd-nfs-storage
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
spec:
volumeName: slskd-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: slskd/charts/slskd/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: slskd
labels:
app.kubernetes.io/instance: slskd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: slskd
app.kubernetes.io/service: slskd
helm.sh/chart: slskd-4.4.0
namespace: slskd
spec:
type: ClusterIP
ports:
- port: 5030
targetPort: 5030
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: slskd
app.kubernetes.io/name: slskd
---
# Source: slskd/charts/slskd/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: slskd-main
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: slskd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: slskd
helm.sh/chart: slskd-4.4.0
namespace: slskd
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: slskd
app.kubernetes.io/instance: slskd
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: slskd
app.kubernetes.io/name: slskd
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
initContainers:
- args:
- -ec
- |
sysctl -w net.ipv4.ip_forward=1;
sysctl -w net.ipv6.conf.all.disable_ipv6=1
command:
- /bin/sh
image: busybox:1.37.0
imagePullPolicy: IfNotPresent
name: init-sysctl
resources:
requests:
cpu: 10m
memory: 128Mi
securityContext:
privileged: true
containers:
- env:
- name: VPN_SERVICE_PROVIDER
value: protonvpn
- name: VPN_TYPE
value: wireguard
- name: WIREGUARD_PRIVATE_KEY
valueFrom:
secretKeyRef:
key: private-key
name: slskd-wireguard-conf
- name: VPN_PORT_FORWARDING
value: "on"
- name: PORT_FORWARD_ONLY
value: "on"
- name: FIREWALL_OUTBOUND_SUBNETS
value: 192.168.1.0/24,10.244.0.0/16
- name: FIREWALL_INPUT_PORTS
value: 5030,50300
- name: DOT
value: "off"
image: ghcr.io/qdm12/gluetun:v3.40.3@sha256:ef4a44819a60469682c7b5e69183e6401171891feaa60186652d292c59e41b30
imagePullPolicy: IfNotPresent
name: gluetun
resources:
limits:
devic.es/tun: "1"
requests:
cpu: 10m
devic.es/tun: "1"
memory: 128Mi
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
privileged: true
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: SLSKD_UMASK
value: "0"
image: slskd/slskd:0.24.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 512Mi
volumeMounts:
- mountPath: /mnt/store
name: data
- mountPath: /app/slskd.yml
mountPropagation: None
name: slskd-config
readOnly: true
subPath: slskd.yml
volumes:
- name: data
persistentVolumeClaim:
claimName: slskd-nfs-storage
- name: slskd-config
secret:
secretName: slskd-config-secret
---
# Source: slskd/charts/slskd/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: slskd-soularr
labels:
app.kubernetes.io/controller: soularr
app.kubernetes.io/instance: slskd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: slskd
helm.sh/chart: slskd-4.4.0
namespace: slskd
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: soularr
app.kubernetes.io/name: slskd
app.kubernetes.io/instance: slskd
template:
metadata:
labels:
app.kubernetes.io/controller: soularr
app.kubernetes.io/instance: slskd
app.kubernetes.io/name: slskd
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: SCRIPT_INTERVAL
value: "300"
image: mrusse08/soularr:latest@sha256:71a0b9e5a522d76bb0ffdb6d720d681fde22417b3a5acc9ecae61c89d05d8afc
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /mnt/store
name: data
- mountPath: /data/config.ini
mountPropagation: None
name: soularr-config
readOnly: true
subPath: config.ini
volumes:
- name: data
persistentVolumeClaim:
claimName: slskd-nfs-storage
- name: soularr-config
secret:
secretName: soularr-config-secret
---
# Source: slskd/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: slskd-config-secret
namespace: slskd
labels:
app.kubernetes.io/name: slskd-config-secret
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: slskd.yml
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/slskd/config
metadataPolicy: None
property: slskd.yml
---
# Source: slskd/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: soularr-config-secret
namespace: slskd
labels:
app.kubernetes.io/name: soularr-config-secret
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: config.ini
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/slskd/soularr
metadataPolicy: None
property: config.ini
---
# Source: slskd/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: slskd-wireguard-conf
namespace: slskd
labels:
app.kubernetes.io/name: slskd-wireguard-conf
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: private-key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /protonvpn/conf/cl01tl
metadataPolicy: None
property: private-key
---
# Source: slskd/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-slskd
namespace: slskd
labels:
app.kubernetes.io/name: http-route-slskd
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- slskd.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: slskd
port: 5030
weight: 100
---
# Source: slskd/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: slskd
namespace: slskd
labels:
app.kubernetes.io/name: slskd
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
spec:
selector:
matchLabels:
app.kubernetes.io/name: slskd
app.kubernetes.io/instance: slskd
endpoints:
- port: http
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -0,0 +1,928 @@
---
# Source: sonarr-4k/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: sonarr-4k-nfs-storage
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k-nfs-storage
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: sonarr-4k/charts/sonarr-4k/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: sonarr-4k-config
labels:
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr-4k
helm.sh/chart: sonarr-4k-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: sonarr-4k
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "20Gi"
storageClassName: "ceph-block"
---
# Source: sonarr-4k/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: sonarr-4k-nfs-storage
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k-nfs-storage
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
volumeName: sonarr-4k-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: sonarr-4k/charts/sonarr-4k/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: sonarr-4k
labels:
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr-4k
app.kubernetes.io/service: sonarr-4k
helm.sh/chart: sonarr-4k-4.4.0
namespace: sonarr-4k
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8989
protocol: TCP
name: http
- port: 9794
targetPort: 9794
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/name: sonarr-4k
---
# Source: sonarr-4k/charts/sonarr-4k/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sonarr-4k
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr-4k
helm.sh/chart: sonarr-4k-4.4.0
namespace: sonarr-4k
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: sonarr-4k
app.kubernetes.io/instance: sonarr-4k
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/name: sonarr-4k
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/sonarr:4.0.16@sha256:60e5edcac39172294ad22d55d1b08c2c0a9fe658cad2f2c4d742ae017d7874de
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- sonarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9794"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: sonarr-4k-config
- name: media
persistentVolumeClaim:
claimName: sonarr-4k-nfs-storage
---
# Source: sonarr-4k/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: sonarr4-4k-postgresql-17-cluster
namespace: sonarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-4k-postgresql-17
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "sonarr4-4k-postgresql-17-external-backup"
serverName: "sonarr4-4k-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "sonarr4-4k-postgresql-17-garage-local-backup"
serverName: "sonarr4-4k-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-4k-postgresql-17-recovery"
serverName: sonarr4-4k-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 512Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: sonarr4-4k-postgresql-17-backup-1
externalClusters:
- name: sonarr4-4k-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "sonarr4-4k-postgresql-17-recovery"
serverName: sonarr4-4k-postgresql-17-backup-1
---
# Source: sonarr-4k/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-4k-config-backup-secret
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k-config-backup-secret
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/sonarr4-4k/sonarr4-4k-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: sonarr-4k/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-4k-postgresql-17-cluster-backup-secret
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: sonarr-4k/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-4k-postgresql-17-cluster-backup-secret-garage
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: sonarr-4k/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-sonarr-4k
namespace: sonarr-4k
labels:
app.kubernetes.io/name: http-route-sonarr-4k
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- sonarr-4k.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: sonarr-4k
port: 80
weight: 100
---
# Source: sonarr-4k/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-4k-postgresql-17-external-backup"
namespace: sonarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-4k-postgresql-17
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/sonarr4-4k/sonarr4-4k-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: sonarr-4k-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-4k-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: sonarr-4k/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-4k-postgresql-17-garage-local-backup"
namespace: sonarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-4k-postgresql-17
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/sonarr-4k/sonarr4-4k-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: sonarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: sonarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: sonarr-4k/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-4k-postgresql-17-recovery"
namespace: sonarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-4k-postgresql-17
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/sonarr-4k/sonarr4-4k-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: sonarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: sonarr-4k/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: sonarr4-4k-postgresql-17-alert-rules
namespace: sonarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-4k-postgresql-17
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/sonarr4-4k-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr-4k"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr-4k"}) < 1
for: 5m
labels:
severity: critical
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr-4k"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr-4k"}) < 2
for: 5m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="sonarr-4k",pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="sonarr-4k",pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
---
# Source: sonarr-4k/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: sonarr-4k
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
groups:
- name: sonarr-4k
rules:
- alert: ExportarrAbsent
annotations:
description: Sonarr 4K Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*sonarr-4k.*"} == 1)
for: 5m
labels:
severity: critical
- alert: Sonarr4KDown
annotations:
description: Sonarr 4K service is down.
summary: Sonarr 4K is down.
expr: |
sonarr_4k_system_status{job=~".*sonarr-4k.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: sonarr-4k/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: sonarr-4k-config-backup-source
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k-config-backup-source
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
sourcePVC: sonarr-4k-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: sonarr-4k-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: sonarr-4k/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "sonarr4-4k-postgresql-17-daily-backup-scheduled-backup"
namespace: sonarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-4k-postgresql-17
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: sonarr4-4k-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-4k-postgresql-17-external-backup"
---
# Source: sonarr-4k/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "sonarr4-4k-postgresql-17-live-backup-scheduled-backup"
namespace: sonarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-4k-postgresql-17
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: sonarr4-4k-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-4k-postgresql-17-garage-local-backup"
---
# Source: sonarr-4k/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: sonarr-4k
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
selector:
matchLabels:
app.kubernetes.io/name: sonarr-4k
app.kubernetes.io/instance: sonarr-4k
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -0,0 +1,928 @@
---
# Source: sonarr-anime/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: sonarr-anime-nfs-storage
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime-nfs-storage
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: sonarr-anime/charts/sonarr-anime/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: sonarr-anime-config
labels:
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr-anime
helm.sh/chart: sonarr-anime-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: sonarr-anime
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "20Gi"
storageClassName: "ceph-block"
---
# Source: sonarr-anime/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: sonarr-anime-nfs-storage
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime-nfs-storage
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
volumeName: sonarr-anime-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: sonarr-anime/charts/sonarr-anime/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: sonarr-anime
labels:
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr-anime
app.kubernetes.io/service: sonarr-anime
helm.sh/chart: sonarr-anime-4.4.0
namespace: sonarr-anime
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8989
protocol: TCP
name: http
- port: 9794
targetPort: 9794
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/name: sonarr-anime
---
# Source: sonarr-anime/charts/sonarr-anime/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sonarr-anime
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr-anime
helm.sh/chart: sonarr-anime-4.4.0
namespace: sonarr-anime
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: sonarr-anime
app.kubernetes.io/instance: sonarr-anime
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/name: sonarr-anime
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/sonarr:4.0.16@sha256:60e5edcac39172294ad22d55d1b08c2c0a9fe658cad2f2c4d742ae017d7874de
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- sonarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9794"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: sonarr-anime-config
- name: media
persistentVolumeClaim:
claimName: sonarr-anime-nfs-storage
---
# Source: sonarr-anime/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: sonarr4-anime-postgresql-17-cluster
namespace: sonarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-anime-postgresql-17
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "sonarr4-anime-postgresql-17-external-backup"
serverName: "sonarr4-anime-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "sonarr4-anime-postgresql-17-garage-local-backup"
serverName: "sonarr4-anime-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-anime-postgresql-17-recovery"
serverName: sonarr4-anime-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 512Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: sonarr4-anime-postgresql-17-backup-1
externalClusters:
- name: sonarr4-anime-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "sonarr4-anime-postgresql-17-recovery"
serverName: sonarr4-anime-postgresql-17-backup-1
---
# Source: sonarr-anime/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-anime-config-backup-secret
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime-config-backup-secret
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/sonarr4-anime/sonarr4-anime-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: sonarr-anime/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-anime-postgresql-17-cluster-backup-secret
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: sonarr-anime/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-anime-postgresql-17-cluster-backup-secret-garage
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: sonarr-anime/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-sonarr-anime
namespace: sonarr-anime
labels:
app.kubernetes.io/name: http-route-sonarr-anime
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- sonarr-anime.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: sonarr-anime
port: 80
weight: 100
---
# Source: sonarr-anime/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-anime-postgresql-17-external-backup"
namespace: sonarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-anime-postgresql-17
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/sonarr4-anime/sonarr4-anime-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: sonarr-anime-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-anime-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: sonarr-anime/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-anime-postgresql-17-garage-local-backup"
namespace: sonarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-anime-postgresql-17
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/sonarr-anime/sonarr4-anime-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: sonarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: sonarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: sonarr-anime/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-anime-postgresql-17-recovery"
namespace: sonarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-anime-postgresql-17
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/sonarr-anime/sonarr4-anime-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: sonarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: sonarr-anime/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: sonarr4-anime-postgresql-17-alert-rules
namespace: sonarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-anime-postgresql-17
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/sonarr4-anime-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr-anime"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr-anime"}) < 1
for: 5m
labels:
severity: critical
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr-anime"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr-anime"}) < 2
for: 5m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="sonarr-anime",pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="sonarr-anime",pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
---
# Source: sonarr-anime/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: sonarr-anime
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
groups:
- name: sonarr-anime
rules:
- alert: ExportarrAbsent
annotations:
description: Sonarr Anime Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*sonarr-anime.*"} == 1)
for: 5m
labels:
severity: critical
- alert: SonarrAnimeDown
annotations:
description: Sonarr Anime service is down.
summary: Sonarr Anime is down.
expr: |
sonarr_anime_system_status{job=~".*sonarr-anime.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: sonarr-anime/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: sonarr-anime-config-backup-source
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime-config-backup-source
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
sourcePVC: sonarr-anime-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: sonarr-anime-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: sonarr-anime/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "sonarr4-anime-postgresql-17-daily-backup-scheduled-backup"
namespace: sonarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-anime-postgresql-17
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: sonarr4-anime-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-anime-postgresql-17-external-backup"
---
# Source: sonarr-anime/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "sonarr4-anime-postgresql-17-live-backup-scheduled-backup"
namespace: sonarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-anime-postgresql-17
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: sonarr4-anime-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-anime-postgresql-17-garage-local-backup"
---
# Source: sonarr-anime/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: sonarr-anime
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
selector:
matchLabels:
app.kubernetes.io/name: sonarr-anime
app.kubernetes.io/instance: sonarr-anime
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -0,0 +1,928 @@
---
# Source: sonarr/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: sonarr-nfs-storage
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr-nfs-storage
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: sonarr/charts/sonarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: sonarr-config
labels:
app.kubernetes.io/instance: sonarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr
helm.sh/chart: sonarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: sonarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "20Gi"
storageClassName: "ceph-block"
---
# Source: sonarr/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: sonarr-nfs-storage
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr-nfs-storage
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
volumeName: sonarr-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: sonarr/charts/sonarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: sonarr
labels:
app.kubernetes.io/instance: sonarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr
app.kubernetes.io/service: sonarr
helm.sh/chart: sonarr-4.4.0
namespace: sonarr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8989
protocol: TCP
name: http
- port: 9794
targetPort: 9794
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr
app.kubernetes.io/name: sonarr
---
# Source: sonarr/charts/sonarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sonarr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr
helm.sh/chart: sonarr-4.4.0
namespace: sonarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: sonarr
app.kubernetes.io/instance: sonarr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr
app.kubernetes.io/name: sonarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/sonarr:4.0.16@sha256:60e5edcac39172294ad22d55d1b08c2c0a9fe658cad2f2c4d742ae017d7874de
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- sonarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9794"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: sonarr-config
- name: media
persistentVolumeClaim:
claimName: sonarr-nfs-storage
---
# Source: sonarr/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: sonarr4-postgresql-17-cluster
namespace: sonarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-postgresql-17
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "sonarr4-postgresql-17-external-backup"
serverName: "sonarr4-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "sonarr4-postgresql-17-garage-local-backup"
serverName: "sonarr4-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-postgresql-17-recovery"
serverName: sonarr4-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 200m
memory: 1Gi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: sonarr4-postgresql-17-backup-1
externalClusters:
- name: sonarr4-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "sonarr4-postgresql-17-recovery"
serverName: sonarr4-postgresql-17-backup-1
---
# Source: sonarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-config-backup-secret
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr-config-backup-secret
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/sonarr4/sonarr4-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: sonarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-postgresql-17-cluster-backup-secret
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: sonarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-postgresql-17-cluster-backup-secret-garage
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: sonarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-sonarr
namespace: sonarr
labels:
app.kubernetes.io/name: http-route-sonarr
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- sonarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: sonarr
port: 80
weight: 100
---
# Source: sonarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-postgresql-17-external-backup"
namespace: sonarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-postgresql-17
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/sonarr4/sonarr4-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: sonarr-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: sonarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-postgresql-17-garage-local-backup"
namespace: sonarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-postgresql-17
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/sonarr/sonarr4-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: sonarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: sonarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: sonarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-postgresql-17-recovery"
namespace: sonarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-postgresql-17
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/sonarr/sonarr4-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: sonarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: sonarr/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: sonarr4-postgresql-17-alert-rules
namespace: sonarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-postgresql-17
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/sonarr4-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr"}) < 1
for: 5m
labels:
severity: critical
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr"}) < 2
for: 5m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="sonarr",pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="sonarr",pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
---
# Source: sonarr/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: sonarr
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
groups:
- name: sonarr
rules:
- alert: ExportarrAbsent
annotations:
description: Sonarr Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*sonarr.*"} == 1)
for: 5m
labels:
severity: critical
- alert: SonarrDown
annotations:
description: Sonarr service is down.
summary: Sonarr is down.
expr: |
sonarr_system_status{job=~".*sonarr.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: sonarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: sonarr-config-backup-source
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr-config-backup-source
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
sourcePVC: sonarr-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: sonarr-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: sonarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "sonarr4-postgresql-17-daily-backup-scheduled-backup"
namespace: sonarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-postgresql-17
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: sonarr4-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-postgresql-17-external-backup"
---
# Source: sonarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "sonarr4-postgresql-17-live-backup-scheduled-backup"
namespace: sonarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: sonarr4-postgresql-17
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: sonarr4-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-postgresql-17-garage-local-backup"
---
# Source: sonarr/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: sonarr
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
selector:
matchLabels:
app.kubernetes.io/name: sonarr
app.kubernetes.io/instance: sonarr
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -0,0 +1,331 @@
---
# Source: tautulli/charts/tautulli/templates/common.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: tautulli
labels:
app.kubernetes.io/instance: tautulli
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tautulli
helm.sh/chart: tautulli-4.4.0
namespace: tautulli
data:
select_tmdb_poster.py: |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Description: Selects the default TMDB poster if no poster is selected
or the current poster is from Gracenote.
Author: /u/SwiftPanda16
Requires: plexapi
Usage:
* Change the posters for an entire library:
python select_tmdb_poster.py --library "Movies"
* Change the poster for a specific item:
python select_tmdb_poster.py --rating_key 1234
* By default locked posters are skipped. To update locked posters:
python select_tmdb_poster.py --library "Movies" --include_locked
Tautulli script trigger:
* Notify on recently added
Tautulli script conditions:
* Filter which media to select the poster. Examples:
[ Media Type | is | movie ]
Tautulli script arguments:
* Recently Added:
--rating_key {rating_key}
'''
import argparse
import os
import plexapi.base
from plexapi.server import PlexServer
plexapi.base.USER_DONT_RELOAD_FOR_KEYS.add('fields')
# Environmental Variables
PLEX_URL = os.getenv('PLEX_URL')
PLEX_TOKEN = os.getenv('PLEX_TOKEN')
def select_tmdb_poster_library(library, include_locked=False):
for item in library.all(includeGuids=False):
# Only reload for fields
item.reload(**{k: 0 for k, v in item._INCLUDES.items()})
select_tmdb_poster_item(item, include_locked=include_locked)
def select_tmdb_poster_item(item, include_locked=False):
if item.isLocked('thumb') and not include_locked: # PlexAPI 4.5.10
print(f"Locked poster for {item.title}. Skipping.")
return
posters = item.posters()
selected_poster = next((p for p in posters if p.selected), None)
if selected_poster is None:
print(f"WARNING: No poster selected for {item.title}.")
else:
skipping = ' Skipping.' if selected_poster.provider != 'gracenote' else ''
print(f"Poster provider is '{selected_poster.provider}' for {item.title}.{skipping}")
if posters and (selected_poster is None or selected_poster.provider == 'gracenote'):
# Fallback to first poster if no TMDB posters are available
tmdb_poster = next((p for p in posters if p.provider == 'tmdb'), posters[0])
# Selecting the poster automatically locks it
tmdb_poster.select()
print(f"Selected {tmdb_poster.provider} poster for {item.title}.")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--rating_key', type=int)
parser.add_argument('--library')
parser.add_argument('--include_locked', action='store_true')
opts = parser.parse_args()
plex = PlexServer(PLEX_URL, PLEX_TOKEN)
if opts.rating_key:
item = plex.fetchItem(opts.rating_key)
select_tmdb_poster_item(item, opts.include_locked)
elif opts.library:
library = plex.library.section(opts.library)
select_tmdb_poster_library(library, opts.include_locked)
else:
print("No --rating_key or --library specified. Exiting.")
---
# Source: tautulli/charts/tautulli/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: tautulli-config
labels:
app.kubernetes.io/instance: tautulli
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tautulli
helm.sh/chart: tautulli-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: tautulli
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: tautulli/charts/tautulli/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: tautulli
labels:
app.kubernetes.io/instance: tautulli
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tautulli
app.kubernetes.io/service: tautulli
helm.sh/chart: tautulli-4.4.0
namespace: tautulli
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8181
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: tautulli
app.kubernetes.io/name: tautulli
---
# Source: tautulli/charts/tautulli/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: tautulli
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: tautulli
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tautulli
helm.sh/chart: tautulli-4.4.0
annotations:
reloader.stakater.com/auto: "true"
namespace: tautulli
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: tautulli
app.kubernetes.io/instance: tautulli
template:
metadata:
annotations:
checksum/configMaps: 8f779aaa6f9bccc9e07f526b05d4f9d81e7e55a443819d526312ff297ac88ba5
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: tautulli
app.kubernetes.io/name: tautulli
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: PUID
value: "1001"
- name: GUID
value: "1001"
- name: TZ
value: US/Central
image: ghcr.io/tautulli/tautulli:v2.16.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /config/scripts/select_tmdb_poster.py
mountPropagation: None
name: scripts
readOnly: true
subPath: select_tmdb_poster.py
volumes:
- name: config
persistentVolumeClaim:
claimName: tautulli-config
- configMap:
name: tautulli-scripts
name: scripts
---
# Source: tautulli/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tautulli-config-backup-secret
namespace: tautulli
labels:
app.kubernetes.io/name: tautulli-config-backup-secret
app.kubernetes.io/instance: tautulli
app.kubernetes.io/part-of: tautulli
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/tautulli/tautulli-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: tautulli/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-tautulli
namespace: tautulli
labels:
app.kubernetes.io/name: http-route-tautulli
app.kubernetes.io/instance: tautulli
app.kubernetes.io/part-of: tautulli
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- tautulli.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: tautulli
port: 80
weight: 100
---
# Source: tautulli/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: tautulli-config-backup-source
namespace: tautulli
labels:
app.kubernetes.io/name: tautulli-config-backup-source
app.kubernetes.io/instance: tautulli
app.kubernetes.io/part-of: tautulli
spec:
sourcePVC: tautulli-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: tautulli-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot

View File

@@ -0,0 +1,658 @@
---
# Source: tdarr/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: tdarr-nfs-storage
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-nfs-storage
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: tdarr/charts/tdarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: tdarr-server
labels:
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
helm.sh/chart: tdarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: tdarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "50Gi"
storageClassName: "ceph-block"
---
# Source: tdarr/charts/tdarr/templates/common.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: tdarr-config
labels:
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
helm.sh/chart: tdarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: tdarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "50Gi"
storageClassName: "ceph-block"
---
# Source: tdarr/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: tdarr-nfs-storage
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-nfs-storage
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
volumeName: tdarr-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: tdarr/charts/tdarr-exporter/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: tdarr-tdarr-exporter
labels:
helm.sh/chart: tdarr-exporter-1.1.7
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
app.kubernetes.io/version: "1.4.3"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 9090
targetPort: 9090
protocol: TCP
name: metrics
selector:
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
---
# Source: tdarr/charts/tdarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: tdarr-api
labels:
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
app.kubernetes.io/service: tdarr-api
helm.sh/chart: tdarr-4.4.0
namespace: tdarr
spec:
type: ClusterIP
ports:
- port: 8266
targetPort: 8266
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: server
app.kubernetes.io/instance: tdarr
app.kubernetes.io/name: tdarr
---
# Source: tdarr/charts/tdarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: tdarr-web
labels:
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
app.kubernetes.io/service: tdarr-web
helm.sh/chart: tdarr-4.4.0
namespace: tdarr
spec:
type: ClusterIP
ports:
- port: 8265
targetPort: 8265
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: server
app.kubernetes.io/instance: tdarr
app.kubernetes.io/name: tdarr
---
# Source: tdarr/charts/tdarr/templates/common.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: tdarr-node
labels:
app.kubernetes.io/controller: node
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
helm.sh/chart: tdarr-4.4.0
namespace: tdarr
spec:
revisionHistoryLimit: 3
selector:
matchLabels:
app.kubernetes.io/controller: node
app.kubernetes.io/name: tdarr
app.kubernetes.io/instance: tdarr
template:
metadata:
annotations:
labels:
app.kubernetes.io/controller: node
app.kubernetes.io/instance: tdarr
app.kubernetes.io/name: tdarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
nodeSelector:
intel.feature.node.kubernetes.io/gpu: "true"
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1001"
- name: PGID
value: "1001"
- name: UMASK_SET
value: "002"
- name: ffmpegVersion
value: "6"
- name: inContainer
value: "true"
- name: nodeName
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: serverIP
value: tdarr-api
- name: serverPort
value: "8266"
image: ghcr.io/haveagitgat/tdarr_node:2.58.02
imagePullPolicy: IfNotPresent
name: main
resources:
limits:
gpu.intel.com/i915: 1
requests:
cpu: 10m
gpu.intel.com/i915: 1
memory: 512Mi
volumeMounts:
- mountPath: /mnt/store
name: media
readOnly: true
- mountPath: /tcache
name: node-cache
volumes:
- name: media
persistentVolumeClaim:
claimName: tdarr-nfs-storage
- emptyDir: {}
name: node-cache
---
# Source: tdarr/charts/tdarr-exporter/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: tdarr-tdarr-exporter
labels:
helm.sh/chart: tdarr-exporter-1.1.7
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
app.kubernetes.io/version: "1.4.3"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
template:
metadata:
annotations:
labels:
helm.sh/chart: tdarr-exporter-1.1.7
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
app.kubernetes.io/version: "1.4.3"
app.kubernetes.io/managed-by: Helm
spec:
serviceAccountName: default
securityContext:
{}
containers:
- name: tdarr-exporter
securityContext:
{}
image: "docker.io/homeylab/tdarr-exporter:1.4.2"
imagePullPolicy: IfNotPresent
ports:
- name: metrics
containerPort: 9090
protocol: TCP
env:
- name: TDARR_URL
value: "http://tdarr-web.tdarr:8265"
- name: VERIFY_SSL
value: "false"
- name: LOG_LEVEL
value: "info"
- name: PROMETHEUS_PORT
value: "9090"
- name: PROMETHEUS_PATH
value: "/metrics"
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: metrics
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
readinessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: metrics
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
startupProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: metrics
initialDelaySeconds: 2
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 2
resources:
requests:
cpu: 10m
memory: 256Mi
---
# Source: tdarr/charts/tdarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: tdarr-server
labels:
app.kubernetes.io/controller: server
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
helm.sh/chart: tdarr-4.4.0
namespace: tdarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: server
app.kubernetes.io/name: tdarr
app.kubernetes.io/instance: tdarr
template:
metadata:
labels:
app.kubernetes.io/controller: server
app.kubernetes.io/instance: tdarr
app.kubernetes.io/name: tdarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1001"
- name: PGID
value: "1001"
- name: UMASK_SET
value: "002"
- name: ffmpegVersion
value: "6"
- name: internalNode
value: "false"
- name: inContainer
value: "true"
- name: nodeName
value: tdarr-server
- name: serverIP
value: 0.0.0.0
- name: serverPort
value: "8266"
- name: webUIPort
value: "8265"
image: ghcr.io/haveagitgat/tdarr:2.58.02
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 200m
memory: 1Gi
volumeMounts:
- mountPath: /app/configs
name: config
- mountPath: /mnt/store
name: media
readOnly: true
- mountPath: /app/server
name: server
- mountPath: /tcache
name: server-cache
volumes:
- name: config
persistentVolumeClaim:
claimName: tdarr-config
- name: media
persistentVolumeClaim:
claimName: tdarr-nfs-storage
- name: server
persistentVolumeClaim:
claimName: tdarr-server
- emptyDir: {}
name: server-cache
---
# Source: tdarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tdarr-config-backup-secret
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-config-backup-secret
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/tdarr/tdarr-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: tdarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tdarr-server-backup-secret
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-server-backup-secret
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/tdarr/tdarr-server"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: tdarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-tdarr
namespace: tdarr
labels:
app.kubernetes.io/name: http-route-tdarr
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- tdarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: tdarr-web
port: 8265
weight: 100
---
# Source: tdarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: tdarr-config-backup-source
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-config-backup-source
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
sourcePVC: tdarr-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: tdarr-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: tdarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: tdarr-server-backup-source
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-server-backup-source
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
sourcePVC: tdarr-server
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: tdarr-server-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: tdarr/charts/tdarr-exporter/templates/servicemonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
helm.sh/chart: tdarr-exporter-1.1.7
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
app.kubernetes.io/version: "1.4.3"
app.kubernetes.io/managed-by: Helm
name: tdarr-tdarr-exporter
spec:
endpoints:
- interval: 1m
path: /metrics
port: metrics
scrapeTimeout: 15s
namespaceSelector:
matchNames:
- tdarr
selector:
matchLabels:
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
---
# Source: tdarr/charts/tdarr-exporter/templates/tests/test-connection.yaml
apiVersion: v1
kind: Pod
metadata:
name: "tdarr-tdarr-exporter-test-connection"
labels:
helm.sh/chart: tdarr-exporter-1.1.7
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
app.kubernetes.io/version: "1.4.3"
app.kubernetes.io/managed-by: Helm
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: "docker.io/busybox:1.36.1"
command: ['wget']
args: ['tdarr-tdarr-exporter:9090/healthz']
restartPolicy: Never

View File

@@ -0,0 +1,441 @@
---
# Source: tubearchivist/templates/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: tubearchivist
labels:
app.kubernetes.io/name: tubearchivist
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged
---
# Source: tubearchivist/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: tubearchivist-nfs-storage
namespace: tubearchivist
labels:
app.kubernetes.io/name: tubearchivist-nfs-storage
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/YouTube
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: tubearchivist/charts/tubearchivist/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: tubearchivist
labels:
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tubearchivist
helm.sh/chart: tubearchivist-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: tubearchivist
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "40Gi"
storageClassName: "ceph-block"
---
# Source: tubearchivist/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: tubearchivist-nfs-storage
namespace: tubearchivist
labels:
app.kubernetes.io/name: tubearchivist-nfs-storage
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
volumeName: tubearchivist-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: tubearchivist/charts/tubearchivist/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: tubearchivist
labels:
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tubearchivist
app.kubernetes.io/service: tubearchivist
helm.sh/chart: tubearchivist-4.4.0
namespace: tubearchivist
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 24000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/name: tubearchivist
---
# Source: tubearchivist/charts/tubearchivist/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: tubearchivist
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tubearchivist
helm.sh/chart: tubearchivist-4.4.0
namespace: tubearchivist
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: tubearchivist
app.kubernetes.io/instance: tubearchivist
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/name: tubearchivist
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: VPN_SERVICE_PROVIDER
value: protonvpn
- name: VPN_TYPE
value: wireguard
- name: WIREGUARD_PRIVATE_KEY
valueFrom:
secretKeyRef:
key: private-key
name: tubearchivist-wireguard-conf
- name: VPN_PORT_FORWARDING
value: "on"
- name: PORT_FORWARD_ONLY
value: "on"
- name: FIREWALL_OUTBOUND_SUBNETS
value: 10.0.0.0/8
- name: FIREWALL_INPUT_PORTS
value: 80,8000,24000
- name: DOT
value: "false"
- name: DNS_KEEP_NAMESERVER
value: "true"
- name: DNS_PLAINTEXT_ADDRESS
value: 10.96.0.10
image: ghcr.io/qdm12/gluetun:v3.40.3@sha256:ef4a44819a60469682c7b5e69183e6401171891feaa60186652d292c59e41b30
imagePullPolicy: IfNotPresent
name: gluetun
resources:
limits:
devic.es/tun: "1"
requests:
cpu: 10m
devic.es/tun: "1"
memory: 128Mi
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
privileged: true
- env:
- name: TZ
value: US/Central
- name: HOST_UID
value: "1000"
- name: HOST_GID
value: "1000"
- name: ES_URL
value: https://elasticsearch-tubearchivist-es-http.tubearchivist:9200
- name: ES_DISABLE_VERIFY_SSL
value: "true"
- name: REDIS_CON
value: redis://redis-replication-tubearchivist-master.tubearchivist:6379
- name: TA_HOST
value: https://tubearchivist.alexlebens.net http://tubearchivist.tubearchivist:80/
- name: TA_PORT
value: "24000"
- name: TA_USERNAME
value: admin
envFrom:
- secretRef:
name: tubearchivist-config-secret
image: bbilly1/tubearchivist:v0.5.8
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 1Gi
volumeMounts:
- mountPath: /cache
name: data
- mountPath: /youtube
name: youtube
volumes:
- name: data
persistentVolumeClaim:
claimName: tubearchivist
- name: youtube
persistentVolumeClaim:
claimName: tubearchivist-nfs-storage
---
# Source: tubearchivist/templates/elasticsearch.yaml
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: elasticsearch-tubearchivist
namespace: tubearchivist
labels:
app.kubernetes.io/name: elasticsearch-tubearchivist
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
version: 8.18.0
auth:
fileRealm:
- secretName: tubearchivist-elasticsearch-secret
nodeSets:
- name: default
count: 1
config:
node.store.allow_mmap: false
path.repo: /usr/share/elasticsearch/data/snapshot
podTemplate:
spec:
volumes:
- name: tubearchivist-snapshot-nfs-storage
nfs:
path: /volume2/Storage/TubeArchivist
server: synologybond.alexlebens.net
containers:
- name: elasticsearch
volumeMounts:
- name: tubearchivist-snapshot-nfs-storage
mountPath: /usr/share/elasticsearch/data/snapshot
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: ceph-block
---
# Source: tubearchivist/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tubearchivist-config-secret
namespace: tubearchivist
labels:
app.kubernetes.io/name: tubearchivist-config-secret
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ELASTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/tubearchivist/env
metadataPolicy: None
property: ELASTIC_PASSWORD
- secretKey: TA_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/tubearchivist/env
metadataPolicy: None
property: TA_PASSWORD
---
# Source: tubearchivist/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tubearchivist-elasticsearch-secret
namespace: tubearchivist
labels:
app.kubernetes.io/name: tubearchivist-elasticsearch-secret
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: username
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/tubearchivist/elasticsearch
metadataPolicy: None
property: username
- secretKey: password
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/tubearchivist/elasticsearch
metadataPolicy: None
property: password
- secretKey: roles
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/tubearchivist/elasticsearch
metadataPolicy: None
property: roles
---
# Source: tubearchivist/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tubearchivist-wireguard-conf
namespace: tubearchivist
labels:
app.kubernetes.io/name: tubearchivist-wireguard-conf
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: private-key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /protonvpn/conf/cl01tl
metadataPolicy: None
property: private-key
---
# Source: tubearchivist/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-tubearchivist
namespace: tubearchivist
labels:
app.kubernetes.io/name: http-route-tubearchivist
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- tubearchivist.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: tubearchivist
port: 80
weight: 100
---
# Source: tubearchivist/templates/redis-replication.yaml
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: redis-replication-tubearchivist
namespace: tubearchivist
labels:
app.kubernetes.io/name: redis-replication-tubearchivist
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v8.0.3
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 50m
memory: 128Mi
storage:
volumeClaimTemplate:
spec:
storageClassName: ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.48.0
---
# Source: tubearchivist/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-replication-tubearchivist
namespace: tubearchivist
labels:
app.kubernetes.io/name: redis-replication-tubearchivist
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
redis-operator: "true"
env: production
spec:
selector:
matchLabels:
redis_setup_type: replication
endpoints:
- port: redis-exporter
interval: 30s
scrapeTimeout: 10s

View File

@@ -0,0 +1,846 @@
---
# Source: vaultwarden/charts/vaultwarden/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: vaultwarden-data
labels:
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: vaultwarden
helm.sh/chart: vaultwarden-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: vaultwarden
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: vaultwarden/charts/vaultwarden/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: vaultwarden
labels:
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: vaultwarden
app.kubernetes.io/service: vaultwarden
helm.sh/chart: vaultwarden-4.4.0
namespace: vaultwarden
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/name: vaultwarden
---
# Source: vaultwarden/charts/cloudflared/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: vaultwarden-cloudflared
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cloudflared
app.kubernetes.io/version: 2025.10.0
helm.sh/chart: cloudflared-1.23.0
namespace: vaultwarden
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: cloudflared
app.kubernetes.io/instance: vaultwarden
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/name: cloudflared
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- --protocol
- http2
- --no-autoupdate
- run
- --token
- $(CF_MANAGED_TUNNEL_TOKEN)
env:
- name: CF_MANAGED_TUNNEL_TOKEN
valueFrom:
secretKeyRef:
key: cf-tunnel-token
name: vaultwarden-cloudflared-secret
image: cloudflare/cloudflared:2025.11.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: vaultwarden/charts/vaultwarden/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: vaultwarden
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: vaultwarden
helm.sh/chart: vaultwarden-4.4.0
namespace: vaultwarden
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: vaultwarden
app.kubernetes.io/instance: vaultwarden
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/name: vaultwarden
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: DOMAIN
value: https://passwords.alexlebens.dev
- name: SIGNUPS_ALLOWED
value: "false"
- name: INVITATIONS_ALLOWED
value: "false"
- name: DATABASE_URL
valueFrom:
secretKeyRef:
key: uri
name: vaultwarden-postgresql-17-cluster-app
image: vaultwarden/server:1.34.3
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /data
name: config
volumes:
- name: config
persistentVolumeClaim:
claimName: vaultwarden-data
---
# Source: vaultwarden/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: vaultwarden-postgresql-17-cluster
namespace: vaultwarden
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: vaultwarden-postgresql-17
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "vaultwarden-postgresql-17-external-backup"
serverName: "vaultwarden-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "vaultwarden-postgresql-17-garage-local-backup"
serverName: "vaultwarden-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "vaultwarden-postgresql-17-recovery"
serverName: vaultwarden-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: vaultwarden-postgresql-17-backup-1
externalClusters:
- name: vaultwarden-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "vaultwarden-postgresql-17-recovery"
serverName: vaultwarden-postgresql-17-backup-1
---
# Source: vaultwarden/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: vaultwarden-cloudflared-secret
namespace: vaultwarden
labels:
app.kubernetes.io/name: vaultwarden-cloudflared-secret
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/vaultwarden
metadataPolicy: None
property: token
---
# Source: vaultwarden/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: vaultwarden-data-backup-secret
namespace: vaultwarden
labels:
app.kubernetes.io/name: vaultwarden-data-backup-secret
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/vaultwarden/vaultwarden-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: vaultwarden/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: vaultwarden-postgresql-17-cluster-backup-secret
namespace: vaultwarden
labels:
app.kubernetes.io/name: vaultwarden-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: vaultwarden/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: vaultwarden-postgresql-17-cluster-backup-secret-garage
namespace: vaultwarden
labels:
app.kubernetes.io/name: vaultwarden-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: vaultwarden/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "vaultwarden-postgresql-17-external-backup"
namespace: vaultwarden
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: vaultwarden-postgresql-17
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/vaultwarden/vaultwarden-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: vaultwarden-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: vaultwarden-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: vaultwarden/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "vaultwarden-postgresql-17-garage-local-backup"
namespace: vaultwarden
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: vaultwarden-postgresql-17
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/vaultwarden/vaultwarden-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: vaultwarden-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: vaultwarden-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: vaultwarden-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: vaultwarden/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "vaultwarden-postgresql-17-recovery"
namespace: vaultwarden
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: vaultwarden-postgresql-17
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/vaultwarden/vaultwarden-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: vaultwarden-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: vaultwarden-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: vaultwarden/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: vaultwarden-postgresql-17-alert-rules
namespace: vaultwarden
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: vaultwarden-postgresql-17
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/vaultwarden-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="vaultwarden"} - cnpg_pg_replication_is_wal_receiver_up{namespace="vaultwarden"}) < 1
for: 5m
labels:
severity: critical
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="vaultwarden"} - cnpg_pg_replication_is_wal_receiver_up{namespace="vaultwarden"}) < 2
for: 5m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="vaultwarden", pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="vaultwarden", pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="vaultwarden", pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="vaultwarden", pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="vaultwarden",pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="vaultwarden", pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="vaultwarden",pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="vaultwarden", pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: vaultwarden
cnpg_cluster: vaultwarden-postgresql-17-cluster
---
# Source: vaultwarden/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: vaultwarden-data-backup-source
namespace: vaultwarden
labels:
app.kubernetes.io/name: vaultwarden-data-backup-source
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
spec:
sourcePVC: vaultwarden-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: vaultwarden-data-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: vaultwarden/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "vaultwarden-postgresql-17-daily-backup-scheduled-backup"
namespace: vaultwarden
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: vaultwarden-postgresql-17
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: vaultwarden-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "vaultwarden-postgresql-17-external-backup"
---
# Source: vaultwarden/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "vaultwarden-postgresql-17-live-backup-scheduled-backup"
namespace: vaultwarden
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: vaultwarden-postgresql-17
app.kubernetes.io/instance: vaultwarden
app.kubernetes.io/part-of: vaultwarden
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: vaultwarden-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "vaultwarden-postgresql-17-garage-local-backup"

View File

@@ -0,0 +1,816 @@
---
# Source: yamtrack/charts/yamtrack/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: yamtrack
labels:
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: yamtrack
app.kubernetes.io/service: yamtrack
helm.sh/chart: yamtrack-4.4.0
namespace: yamtrack
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/name: yamtrack
---
# Source: yamtrack/charts/yamtrack/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: yamtrack
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: yamtrack
helm.sh/chart: yamtrack-4.4.0
namespace: yamtrack
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: yamtrack
app.kubernetes.io/instance: yamtrack
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/name: yamtrack
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: URLS
value: https://yamtrack.alexlebens.net
- name: REGISTRATION
value: "false"
- name: SOCIAL_PROVIDERS
value: allauth.socialaccount.providers.openid_connect
- name: SOCIALACCOUNT_PROVIDERS
valueFrom:
secretKeyRef:
key: SOCIALACCOUNT_PROVIDERS
name: yamtrack-oidc-secret
- name: SECRET
valueFrom:
secretKeyRef:
key: SECRET
name: yamtrack-config-secret
- name: REDIS_URL
value: redis://redis-replication-yamtrack-master.yamtrack:6379
- name: DB_USER
valueFrom:
secretKeyRef:
key: username
name: yamtrack-postgresql-17-cluster-app
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: yamtrack-postgresql-17-cluster-app
- name: DB_NAME
valueFrom:
secretKeyRef:
key: dbname
name: yamtrack-postgresql-17-cluster-app
- name: DB_HOST
valueFrom:
secretKeyRef:
key: host
name: yamtrack-postgresql-17-cluster-app
- name: DB_PORT
valueFrom:
secretKeyRef:
key: port
name: yamtrack-postgresql-17-cluster-app
image: ghcr.io/fuzzygrim/yamtrack:0.24.7
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
---
# Source: yamtrack/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: yamtrack-postgresql-17-cluster
namespace: yamtrack
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: yamtrack-postgresql-17
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "yamtrack-postgresql-17-external-backup"
serverName: "yamtrack-postgresql-17-backup-2"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "yamtrack-postgresql-17-garage-local-backup"
serverName: "yamtrack-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "yamtrack-postgresql-17-recovery"
serverName: yamtrack-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: yamtrack-postgresql-17-backup-1
externalClusters:
- name: yamtrack-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "yamtrack-postgresql-17-recovery"
serverName: yamtrack-postgresql-17-backup-1
---
# Source: yamtrack/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: yamtrack-config-secret
namespace: yamtrack
labels:
app.kubernetes.io/name: yamtrack-config-secret
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: SECRET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/yamtrack/config
metadataPolicy: None
property: SECRET
---
# Source: yamtrack/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: yamtrack-oidc-secret
namespace: yamtrack
labels:
app.kubernetes.io/name: yamtrack-oidc-secret
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: SOCIALACCOUNT_PROVIDERS
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/yamtrack
metadataPolicy: None
property: SOCIALACCOUNT_PROVIDERS
---
# Source: yamtrack/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: yamtrack-postgresql-17-cluster-backup-secret
namespace: yamtrack
labels:
app.kubernetes.io/name: yamtrack-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: yamtrack/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: yamtrack-postgresql-17-cluster-backup-secret-garage
namespace: yamtrack
labels:
app.kubernetes.io/name: yamtrack-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: yamtrack/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-yamtrack
namespace: yamtrack
labels:
app.kubernetes.io/name: http-route-yamtrack
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- yamtrack.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: yamtrack
port: 80
weight: 100
---
# Source: yamtrack/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "yamtrack-postgresql-17-external-backup"
namespace: yamtrack
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: yamtrack-postgresql-17
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/yamtrack/yamtrack-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: yamtrack-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: yamtrack-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: yamtrack/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "yamtrack-postgresql-17-garage-local-backup"
namespace: yamtrack
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: yamtrack-postgresql-17
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/yamtrack/yamtrack-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: yamtrack-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: yamtrack-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: yamtrack-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: yamtrack/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "yamtrack-postgresql-17-recovery"
namespace: yamtrack
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: yamtrack-postgresql-17
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/yamtrack/yamtrack-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: yamtrack-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: yamtrack-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: yamtrack/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: yamtrack-postgresql-17-alert-rules
namespace: yamtrack
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: yamtrack-postgresql-17
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/yamtrack-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="yamtrack"} - cnpg_pg_replication_is_wal_receiver_up{namespace="yamtrack"}) < 1
for: 5m
labels:
severity: critical
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="yamtrack"} - cnpg_pg_replication_is_wal_receiver_up{namespace="yamtrack"}) < 2
for: 5m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="yamtrack", pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="yamtrack", pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="yamtrack", pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="yamtrack", pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="yamtrack",pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="yamtrack", pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="yamtrack",pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="yamtrack", pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: yamtrack
cnpg_cluster: yamtrack-postgresql-17-cluster
---
# Source: yamtrack/templates/redis-replication.yaml
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: redis-replication-yamtrack
namespace: yamtrack
labels:
app.kubernetes.io/name: redis-replication-yamtrack
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v8.0.3
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 50m
memory: 128Mi
storage:
volumeClaimTemplate:
spec:
storageClassName: ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.48.0
---
# Source: yamtrack/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "yamtrack-postgresql-17-daily-backup-scheduled-backup"
namespace: yamtrack
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: yamtrack-postgresql-17
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: yamtrack-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "yamtrack-postgresql-17-external-backup"
---
# Source: yamtrack/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "yamtrack-postgresql-17-live-backup-scheduled-backup"
namespace: yamtrack
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: yamtrack-postgresql-17
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: yamtrack-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "yamtrack-postgresql-17-garage-local-backup"
---
# Source: yamtrack/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-replication-yamtrack
namespace: yamtrack
labels:
app.kubernetes.io/name: redis-replication-yamtrack
app.kubernetes.io/instance: yamtrack
app.kubernetes.io/part-of: yamtrack
redis-operator: "true"
env: production
spec:
selector:
matchLabels:
redis_setup_type: replication
endpoints:
- port: redis-exporter
interval: 30s
scrapeTimeout: 10s