Files
infrastructure/clusters/cl01tl/manifests/lidarr/lidarr.yaml

929 lines
33 KiB
YAML

---
# Source: lidarr/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: lidarr-nfs-storage
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-nfs-storage
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: lidarr/charts/lidarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: lidarr-config
labels:
app.kubernetes.io/instance: lidarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: lidarr
helm.sh/chart: lidarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: lidarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: lidarr/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: lidarr-nfs-storage
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-nfs-storage
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
volumeName: lidarr-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: lidarr/charts/lidarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: lidarr
labels:
app.kubernetes.io/instance: lidarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: lidarr
app.kubernetes.io/service: lidarr
helm.sh/chart: lidarr-4.4.0
namespace: lidarr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8686
protocol: TCP
name: http
- port: 9792
targetPort: 9792
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: lidarr
app.kubernetes.io/name: lidarr
---
# Source: lidarr/charts/lidarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: lidarr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: lidarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: lidarr
helm.sh/chart: lidarr-4.4.0
namespace: lidarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: lidarr
app.kubernetes.io/instance: lidarr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: lidarr
app.kubernetes.io/name: lidarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 1000
runAsUser: 1000
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/lidarr:2.14.5@sha256:5e1235d00b5d1c1f60ca0d472e554a6611aef41aa7b5b6d88260214bf4809af0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- lidarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9792"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: lidarr-config
- name: media
persistentVolumeClaim:
claimName: lidarr-nfs-storage
---
# Source: lidarr/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: lidarr2-postgresql-17-cluster
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "lidarr2-postgresql-17-external-backup"
serverName: "lidarr2-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "lidarr2-postgresql-17-garage-local-backup"
serverName: "lidarr2-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "lidarr2-postgresql-17-recovery"
serverName: lidarr2-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 200m
memory: 1Gi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: lidarr2-postgresql-17-backup-1
externalClusters:
- name: lidarr2-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "lidarr2-postgresql-17-recovery"
serverName: lidarr2-postgresql-17-backup-1
---
# Source: lidarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: lidarr-config-backup-secret
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-config-backup-secret
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/lidarr2/lidarr2-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: lidarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: lidarr-postgresql-17-cluster-backup-secret
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: lidarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: lidarr-postgresql-17-cluster-backup-secret-garage
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: lidarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-lidarr
namespace: lidarr
labels:
app.kubernetes.io/name: http-route-lidarr
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- lidarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: lidarr
port: 80
weight: 100
---
# Source: lidarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "lidarr2-postgresql-17-external-backup"
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/lidarr2/lidarr2-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: lidarr-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: lidarr-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: lidarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "lidarr2-postgresql-17-garage-local-backup"
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/lidarr/lidarr2-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: lidarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: lidarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: lidarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: lidarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "lidarr2-postgresql-17-recovery"
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/lidarr/lidarr2-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: lidarr-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: lidarr-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: lidarr/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: lidarr2-postgresql-17-alert-rules
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/lidarr2-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="lidarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="lidarr"}) < 1
for: 5m
labels:
severity: critical
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="lidarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="lidarr"}) < 2
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="lidarr",pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="lidarr",pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
---
# Source: lidarr/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: lidarr
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
groups:
- name: lidarr
rules:
- alert: ExportarrAbsent
annotations:
description: Lidarr Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*lidarr.*"} == 1)
for: 5m
labels:
severity: critical
- alert: LidarrDown
annotations:
description: Lidarr service is down.
summary: Lidarr is down.
expr: |
lidarr_system_status{job=~".*lidarr.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: lidarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: lidarr-config-backup-source
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-config-backup-source
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
sourcePVC: lidarr-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: lidarr-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: lidarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "lidarr2-postgresql-17-daily-backup-scheduled-backup"
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: lidarr2-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "lidarr2-postgresql-17-external-backup"
---
# Source: lidarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "lidarr2-postgresql-17-live-backup-scheduled-backup"
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: lidarr2-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "lidarr2-postgresql-17-garage-local-backup"
---
# Source: lidarr/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: lidarr
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
selector:
matchLabels:
app.kubernetes.io/name: lidarr
app.kubernetes.io/instance: lidarr
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics