diff --git a/clusters/cl01tl/manifests/actual/actual.yaml b/clusters/cl01tl/manifests/actual/actual.yaml new file mode 100644 index 000000000..a5eed7638 --- /dev/null +++ b/clusters/cl01tl/manifests/actual/actual.yaml @@ -0,0 +1,227 @@ +--- +# Source: actual/charts/actual/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: actual-data + labels: + app.kubernetes.io/instance: actual + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: actual + helm.sh/chart: actual-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: actual +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "2Gi" + storageClassName: "ceph-block" +--- +# Source: actual/charts/actual/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: actual + labels: + app.kubernetes.io/instance: actual + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: actual + app.kubernetes.io/service: actual + helm.sh/chart: actual-4.4.0 + namespace: actual +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 5006 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: actual + app.kubernetes.io/name: actual +--- +# Source: actual/charts/actual/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: actual + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: actual + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: actual + helm.sh/chart: actual-4.4.0 + namespace: actual +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: actual + app.kubernetes.io/instance: actual + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: actual + app.kubernetes.io/name: actual + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + image: ghcr.io/actualbudget/actual:25.11.0 + imagePullPolicy: IfNotPresent + livenessProbe: + exec: + command: + - /usr/bin/env + - bash + - -c + - node src/scripts/health-check.js + failureThreshold: 5 + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 + name: main + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /data + name: data + volumes: + - name: data + persistentVolumeClaim: + claimName: actual-data +--- +# Source: actual/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: actual-data-backup-secret + namespace: actual + labels: + app.kubernetes.io/name: actual-data-backup-secret + app.kubernetes.io/instance: actual + app.kubernetes.io/part-of: actual +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/actual/actual-data" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: actual/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-actual + namespace: actual + labels: + app.kubernetes.io/name: http-route-actual + app.kubernetes.io/instance: actual + app.kubernetes.io/part-of: actual +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - actual.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: actual + port: 80 + weight: 100 +--- +# Source: actual/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: actual-data-backup-source + namespace: actual + labels: + app.kubernetes.io/name: actual-data-backup-source + app.kubernetes.io/instance: actual + app.kubernetes.io/part-of: actual +spec: + sourcePVC: actual-data + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: actual-data-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot diff --git a/clusters/cl01tl/manifests/audiobookshelf/audiobookshelf.yaml b/clusters/cl01tl/manifests/audiobookshelf/audiobookshelf.yaml new file mode 100644 index 000000000..adf367b22 --- /dev/null +++ b/clusters/cl01tl/manifests/audiobookshelf/audiobookshelf.yaml @@ -0,0 +1,471 @@ +--- +# Source: audiobookshelf/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: audiobookshelf-nfs-storage + namespace: audiobookshelf + labels: + app.kubernetes.io/name: audiobookshelf-nfs-storage + app.kubernetes.io/instance: audiobookshelf + app.kubernetes.io/part-of: audiobookshelf +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: audiobookshelf/charts/audiobookshelf/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: audiobookshelf-config + labels: + app.kubernetes.io/instance: audiobookshelf + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: audiobookshelf + helm.sh/chart: audiobookshelf-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: audiobookshelf +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "2Gi" + storageClassName: "ceph-block" +--- +# Source: audiobookshelf/charts/audiobookshelf/templates/common.yaml +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: audiobookshelf-metadata + labels: + app.kubernetes.io/instance: audiobookshelf + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: audiobookshelf + helm.sh/chart: audiobookshelf-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: audiobookshelf +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "10Gi" + storageClassName: "ceph-block" +--- +# Source: audiobookshelf/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: audiobookshelf-nfs-storage-backup + namespace: audiobookshelf + labels: + app.kubernetes.io/name: audiobookshelf-nfs-storage-backup + app.kubernetes.io/instance: audiobookshelf + app.kubernetes.io/part-of: audiobookshelf +spec: + volumeMode: Filesystem + storageClassName: nfs-client + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +--- +# Source: audiobookshelf/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: audiobookshelf-nfs-storage + namespace: audiobookshelf + labels: + app.kubernetes.io/name: audiobookshelf-nfs-storage + app.kubernetes.io/instance: audiobookshelf + app.kubernetes.io/part-of: audiobookshelf +spec: + volumeName: audiobookshelf-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: audiobookshelf/charts/audiobookshelf/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: audiobookshelf + labels: + app.kubernetes.io/instance: audiobookshelf + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: audiobookshelf + app.kubernetes.io/service: audiobookshelf + helm.sh/chart: audiobookshelf-4.4.0 + namespace: audiobookshelf +spec: + type: ClusterIP + ports: + - port: 8000 + targetPort: 8000 + protocol: TCP + name: apprise + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: audiobookshelf + app.kubernetes.io/name: audiobookshelf +--- +# Source: audiobookshelf/charts/audiobookshelf/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: audiobookshelf + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: audiobookshelf + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: audiobookshelf + helm.sh/chart: audiobookshelf-4.4.0 + namespace: audiobookshelf +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: audiobookshelf + app.kubernetes.io/instance: audiobookshelf + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: audiobookshelf + app.kubernetes.io/name: audiobookshelf + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: PGID + value: "1000" + - name: PUID + value: "1000" + - name: APPRISE_STORAGE_MODE + value: memory + - name: APPRISE_STATEFUL_MODE + value: disabled + - name: APPRISE_WORKER_COUNT + value: "1" + - name: APPRISE_STATELESS_URLS + valueFrom: + secretKeyRef: + key: ntfy-url + name: audiobookshelf-apprise-config + image: caronc/apprise:1.2.6 + imagePullPolicy: IfNotPresent + name: apprise-api + resources: + requests: + cpu: 10m + memory: 128Mi + - env: + - name: TZ + value: US/Central + image: ghcr.io/advplyr/audiobookshelf:2.30.0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /mnt/store/ + name: audiobooks + - mountPath: /metadata/backups + name: backup + - mountPath: /config + name: config + - mountPath: /metadata + name: metadata + volumes: + - name: audiobooks + persistentVolumeClaim: + claimName: audiobookshelf-nfs-storage + - name: backup + persistentVolumeClaim: + claimName: audiobookshelf-nfs-storage-backup + - name: config + persistentVolumeClaim: + claimName: audiobookshelf-config + - name: metadata + persistentVolumeClaim: + claimName: audiobookshelf-metadata +--- +# Source: audiobookshelf/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: audiobookshelf-apprise-config + namespace: audiobookshelf + labels: + app.kubernetes.io/name: audiobookshelf-apprise-config + app.kubernetes.io/instance: audiobookshelf + app.kubernetes.io/part-of: audiobookshelf +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ntfy-url + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/audiobookshelf/apprise + metadataPolicy: None + property: ntfy-url +--- +# Source: audiobookshelf/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: audiobookshelf-config-backup-secret + namespace: audiobookshelf + labels: + app.kubernetes.io/name: audiobookshelf-config-backup-secret + app.kubernetes.io/instance: audiobookshelf + app.kubernetes.io/part-of: audiobookshelf +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/audiobookshelf/audiobookshelf-config" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: audiobookshelf/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: audiobookshelf-metadata-backup-secret + namespace: audiobookshelf + labels: + app.kubernetes.io/name: audiobookshelf-metadata-backup-secret + app.kubernetes.io/instance: audiobookshelf + app.kubernetes.io/part-of: audiobookshelf +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/audiobookshelf/audiobookshelf-metadata" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: audiobookshelf/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-audiobookshelf + namespace: audiobookshelf + labels: + app.kubernetes.io/name: http-route-audiobookshelf + app.kubernetes.io/instance: audiobookshelf + app.kubernetes.io/part-of: audiobookshelf +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - audiobookshelf.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: audiobookshelf + port: 80 + weight: 100 +--- +# Source: audiobookshelf/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: audiobookshelf-config-backup-source + namespace: audiobookshelf + labels: + app.kubernetes.io/name: audiobookshelf-config-backup-source + app.kubernetes.io/instance: audiobookshelf + app.kubernetes.io/part-of: audiobookshelf +spec: + sourcePVC: audiobookshelf-config + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: audiobookshelf-config-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: audiobookshelf/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: audiobookshelf-metadata-backup-source + namespace: audiobookshelf + labels: + app.kubernetes.io/name: audiobookshelf-metadata-backup-source + app.kubernetes.io/instance: audiobookshelf + app.kubernetes.io/part-of: audiobookshelf +spec: + sourcePVC: audiobookshelf-metadata + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: audiobookshelf-metadata-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: audiobookshelf/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: audiobookshelf-apprise + namespace: audiobookshelf + labels: + app.kubernetes.io/name: audiobookshelf-apprise + app.kubernetes.io/instance: audiobookshelf + app.kubernetes.io/part-of: audiobookshelf +spec: + endpoints: + - port: apprise + interval: 30s + scrapeTimeout: 15s + path: /metrics + selector: + matchLabels: + app.kubernetes.io/name: audiobookshelf + app.kubernetes.io/instance: audiobookshelf diff --git a/clusters/cl01tl/manifests/bazarr/bazarr.yaml b/clusters/cl01tl/manifests/bazarr/bazarr.yaml new file mode 100644 index 000000000..871fcbc5a --- /dev/null +++ b/clusters/cl01tl/manifests/bazarr/bazarr.yaml @@ -0,0 +1,278 @@ +--- +# Source: bazarr/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: bazarr-nfs-storage + namespace: bazarr + labels: + app.kubernetes.io/name: bazarr-nfs-storage + app.kubernetes.io/instance: bazarr + app.kubernetes.io/part-of: bazarr +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: bazarr/charts/bazarr/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: bazarr-config + labels: + app.kubernetes.io/instance: bazarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: bazarr + helm.sh/chart: bazarr-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: bazarr +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: bazarr/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: bazarr-nfs-storage + namespace: bazarr + labels: + app.kubernetes.io/name: bazarr-nfs-storage + app.kubernetes.io/instance: bazarr + app.kubernetes.io/part-of: bazarr +spec: + volumeName: bazarr-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: bazarr/charts/bazarr/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: bazarr + labels: + app.kubernetes.io/instance: bazarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: bazarr + app.kubernetes.io/service: bazarr + helm.sh/chart: bazarr-4.4.0 + namespace: bazarr +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 6767 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: bazarr + app.kubernetes.io/name: bazarr +--- +# Source: bazarr/charts/bazarr/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: bazarr + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: bazarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: bazarr + helm.sh/chart: bazarr-4.4.0 + namespace: bazarr +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: bazarr + app.kubernetes.io/instance: bazarr + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: bazarr + app.kubernetes.io/name: bazarr + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + securityContext: + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + runAsGroup: 1000 + runAsUser: 1000 + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: PUID + value: "1000" + - name: PGID + value: "1000" + image: ghcr.io/linuxserver/bazarr:1.5.3@sha256:2be164c02c0bb311b6c32e57d3d0ddc2813d524e89ab51a3408c1bf6fafecda5 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /config + name: config + - mountPath: /mnt/store + name: media + volumes: + - name: config + persistentVolumeClaim: + claimName: bazarr-config + - name: media + persistentVolumeClaim: + claimName: bazarr-nfs-storage +--- +# Source: bazarr/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: bazarr-config-backup-secret + namespace: bazarr + labels: + app.kubernetes.io/name: bazarr-config-backup-secret + app.kubernetes.io/instance: bazarr + app.kubernetes.io/part-of: bazarr +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/bazarr/bazarr-config" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: bazarr/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-bazarr + namespace: bazarr + labels: + app.kubernetes.io/name: http-route-bazarr + app.kubernetes.io/instance: bazarr + app.kubernetes.io/part-of: bazarr +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - bazarr.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: bazarr + port: 80 + weight: 100 +--- +# Source: bazarr/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: bazarr-config-backup-source + namespace: bazarr + labels: + app.kubernetes.io/name: bazarr-config-backup-source + app.kubernetes.io/instance: bazarr + app.kubernetes.io/part-of: bazarr +spec: + sourcePVC: bazarr-config + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: bazarr-config-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + moverSecurityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot diff --git a/clusters/cl01tl/manifests/booklore/booklore.yaml b/clusters/cl01tl/manifests/booklore/booklore.yaml new file mode 100644 index 000000000..b0b3c4cbf --- /dev/null +++ b/clusters/cl01tl/manifests/booklore/booklore.yaml @@ -0,0 +1,946 @@ +--- +# Source: booklore/templates/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: booklore + annotations: + volsync.backube/privileged-movers: "true" + labels: + app.kubernetes.io/name: booklore + app.kubernetes.io/instance: booklore + app.kubernetes.io/part-of: booklore +--- +# Source: booklore/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: booklore-books-nfs-storage + namespace: booklore + labels: + app.kubernetes.io/name: booklore-books-nfs-storage + app.kubernetes.io/instance: booklore + app.kubernetes.io/part-of: booklore +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage/Books + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: booklore/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: booklore-books-import-nfs-storage + namespace: booklore + labels: + app.kubernetes.io/name: booklore-books-import-nfs-storage + app.kubernetes.io/instance: booklore + app.kubernetes.io/part-of: booklore +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage/Books Import + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: booklore/charts/booklore/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: booklore-config + labels: + app.kubernetes.io/instance: booklore + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: booklore + helm.sh/chart: booklore-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: booklore +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: booklore/charts/booklore/templates/common.yaml +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: booklore-data + labels: + app.kubernetes.io/instance: booklore + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: booklore + helm.sh/chart: booklore-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: booklore +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "10Gi" + storageClassName: "ceph-block" +--- +# Source: booklore/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: booklore-books-nfs-storage + namespace: booklore + labels: + app.kubernetes.io/name: booklore-books-nfs-storage + app.kubernetes.io/instance: booklore + app.kubernetes.io/part-of: booklore +spec: + volumeName: booklore-books-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: booklore/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: booklore-books-import-nfs-storage + namespace: booklore + labels: + app.kubernetes.io/name: booklore-books-import-nfs-storage + app.kubernetes.io/instance: booklore + app.kubernetes.io/part-of: booklore +spec: + volumeName: booklore-books-import-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: booklore/charts/booklore/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: booklore + labels: + app.kubernetes.io/instance: booklore + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: booklore + app.kubernetes.io/service: booklore + helm.sh/chart: booklore-4.4.0 + namespace: booklore +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 6060 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: booklore + app.kubernetes.io/name: booklore +--- +# Source: booklore/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: garage-ps10rp + namespace: booklore + labels: + app.kubernetes.io/name: garage-ps10rp + app.kubernetes.io/instance: booklore + app.kubernetes.io/part-of: booklore + annotations: + tailscale.com/tailnet-fqdn: garage-ps10rp.boreal-beaufort.ts.net +spec: + externalName: placeholder + type: ExternalName +--- +# Source: booklore/charts/booklore/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: booklore + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: booklore + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: booklore + helm.sh/chart: booklore-4.4.0 + namespace: booklore +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: booklore + app.kubernetes.io/instance: booklore + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: booklore + app.kubernetes.io/name: booklore + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: America/Chicago + - name: DATABASE_URL + value: jdbc:mariadb://booklore-mariadb-cluster-primary.booklore:3306/booklore + - name: DATABASE_USERNAME + value: booklore + - name: DATABASE_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: booklore-database-secret + - name: BOOKLORE_PORT + value: "6060" + - name: SWAGGER_ENABLED + value: "false" + image: ghcr.io/booklore-app/booklore:v1.12.0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 50m + memory: 128Mi + volumeMounts: + - mountPath: /bookdrop + name: books-import + - mountPath: /app/data + name: config + - mountPath: /data + name: data + - mountPath: /bookdrop/ingest + name: ingest + volumes: + - emptyDir: {} + name: books-import + - name: config + persistentVolumeClaim: + claimName: booklore-config + - name: data + persistentVolumeClaim: + claimName: booklore-data + - name: ingest + persistentVolumeClaim: + claimName: booklore-books-import-nfs-storage +--- +# Source: booklore/charts/mariadb-cluster/templates/database.yaml +apiVersion: k8s.mariadb.com/v1alpha1 +kind: Database +metadata: + name: booklore-mariadb-cluster-booklore + namespace: booklore + labels: + helm.sh/chart: mariadb-cluster-25.10.2 + app.kubernetes.io/name: mariadb-cluster + app.kubernetes.io/instance: booklore + app.kubernetes.io/version: "0.0.0" + app.kubernetes.io/managed-by: Helm +spec: + mariaDbRef: + name: booklore-mariadb-cluster + namespace: booklore + characterSet: utf8 + cleanupPolicy: Delete + collate: utf8_general_ci + name: booklore + requeueInterval: 10h +--- +# Source: booklore/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: booklore-database-secret + namespace: booklore + labels: + app.kubernetes.io/name: booklore-database-secret + app.kubernetes.io/instance: booklore + app.kubernetes.io/part-of: booklore +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: password + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/booklore/database + metadataPolicy: None + property: password +--- +# Source: booklore/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: booklore-data-replication-secret + namespace: booklore + labels: + app.kubernetes.io/name: booklore-data-replication-secret + app.kubernetes.io/instance: booklore + app.kubernetes.io/part-of: booklore +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: psk.txt + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/booklore/replication + metadataPolicy: None + property: psk.txt +--- +# Source: booklore/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: booklore-config-backup-secret + namespace: booklore + labels: + app.kubernetes.io/name: booklore-config-backup-secret + app.kubernetes.io/instance: booklore + app.kubernetes.io/part-of: booklore +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/booklore/booklore-config" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /volsync/restic/digital-ocean + metadataPolicy: None + property: BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /volsync/restic/digital-ocean + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: AWS_ACCESS_KEY_ID + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: AWS_SECRET_ACCESS_KEY +--- +# Source: booklore/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: booklore-data-backup-secret-local + namespace: booklore + labels: + app.kubernetes.io/name: booklore-data-backup-secret-local + app.kubernetes.io/instance: booklore + app.kubernetes.io/part-of: booklore +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/booklore/booklore-data" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /volsync/restic/garage-local + metadataPolicy: None + property: BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /volsync/restic/garage-local + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/volsync-backups + metadataPolicy: None + property: ACCESS_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/volsync-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/volsync-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY +--- +# Source: booklore/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: booklore-data-backup-secret-remote + namespace: booklore + labels: + app.kubernetes.io/name: booklore-data-backup-secret-remote + app.kubernetes.io/instance: booklore + app.kubernetes.io/part-of: booklore +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/booklore/booklore-data" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /volsync/restic/garage-remote + metadataPolicy: None + property: BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /volsync/restic/garage-remote + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/volsync-backups + metadataPolicy: None + property: ACCESS_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/volsync-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/volsync-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY +--- +# Source: booklore/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: booklore-data-backup-secret-external + namespace: booklore + labels: + app.kubernetes.io/name: booklore-data-backup-secret-external + app.kubernetes.io/instance: booklore + app.kubernetes.io/part-of: booklore +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/booklore/booklore-data" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /volsync/restic/digital-ocean + metadataPolicy: None + property: BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /volsync/restic/digital-ocean + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: AWS_ACCESS_KEY_ID + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: AWS_SECRET_ACCESS_KEY +--- +# Source: booklore/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: booklore-mariadb-cluster-backup-secret-external + namespace: booklore + labels: + app.kubernetes.io/name: booklore-mariadb-cluster-backup-secret-external + app.kubernetes.io/instance: booklore + app.kubernetes.io/part-of: booklore +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: access + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/mariadb-backups + metadataPolicy: None + property: access + - secretKey: secret + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/mariadb-backups + metadataPolicy: None + property: secret +--- +# Source: booklore/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: booklore-mariadb-cluster-backup-secret-garage + namespace: booklore + labels: + app.kubernetes.io/name: booklore-mariadb-cluster-backup-secret-garage + app.kubernetes.io/instance: booklore + app.kubernetes.io/part-of: booklore +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: access + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/mariadb-backups + metadataPolicy: None + property: access + - secretKey: secret + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/mariadb-backups + metadataPolicy: None + property: secret +--- +# Source: booklore/charts/mariadb-cluster/templates/grant.yaml +apiVersion: k8s.mariadb.com/v1alpha1 +kind: Grant +metadata: + name: booklore-mariadb-cluster-booklore + namespace: booklore + labels: + helm.sh/chart: mariadb-cluster-25.10.2 + app.kubernetes.io/name: mariadb-cluster + app.kubernetes.io/instance: booklore + app.kubernetes.io/version: "0.0.0" + app.kubernetes.io/managed-by: Helm +spec: + mariaDbRef: + name: booklore-mariadb-cluster + namespace: booklore + cleanupPolicy: Delete + database: booklore + grantOption: true + host: '%' + privileges: + - ALL PRIVILEGES + requeueInterval: 10h + retryInterval: 30s + table: '*' + username: booklore +--- +# Source: booklore/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-booklore + namespace: booklore + labels: + app.kubernetes.io/name: http-route-booklore + app.kubernetes.io/instance: booklore + app.kubernetes.io/part-of: booklore +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - booklore.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: booklore + port: 80 + weight: 100 +--- +# Source: booklore/charts/mariadb-cluster/templates/mariadb.yaml +apiVersion: k8s.mariadb.com/v1alpha1 +kind: MariaDB +metadata: + name: booklore-mariadb-cluster + namespace: booklore + labels: + helm.sh/chart: mariadb-cluster-25.10.2 + app.kubernetes.io/name: mariadb-cluster + app.kubernetes.io/instance: booklore + app.kubernetes.io/version: "0.0.0" + app.kubernetes.io/managed-by: Helm +spec: + galera: + enabled: true + replicas: 3 + rootPasswordSecretKeyRef: + generate: false + key: password + name: booklore-database-secret + storage: + size: 5Gi +--- +# Source: booklore/charts/mariadb-cluster/templates/physicalbackup.yaml +apiVersion: k8s.mariadb.com/v1alpha1 +kind: PhysicalBackup +metadata: + name: booklore-mariadb-cluster-backup-external + namespace: booklore + labels: + helm.sh/chart: mariadb-cluster-25.10.2 + app.kubernetes.io/name: mariadb-cluster + app.kubernetes.io/instance: booklore + app.kubernetes.io/version: "0.0.0" + app.kubernetes.io/managed-by: Helm +spec: + mariaDbRef: + name: booklore-mariadb-cluster + namespace: booklore + compression: gzip + maxRetention: 720h + schedule: + cron: 0 0 * * 0 + immediate: true + suspend: false + storage: + s3: + accessKeyIdSecretKeyRef: + key: access + name: booklore-mariadb-cluster-backup-secret-external + bucket: mariadb-backups-b230a2f5aecf080a4b372c08 + endpoint: nyc3.digitaloceanspaces.com + prefix: cl01tl/booklore + region: us-east-1 + secretAccessKeySecretKeyRef: + key: secret + name: booklore-mariadb-cluster-backup-secret-external + tls: + enabled: true +--- +# Source: booklore/charts/mariadb-cluster/templates/physicalbackup.yaml +apiVersion: k8s.mariadb.com/v1alpha1 +kind: PhysicalBackup +metadata: + name: booklore-mariadb-cluster-backup-garage + namespace: booklore + labels: + helm.sh/chart: mariadb-cluster-25.10.2 + app.kubernetes.io/name: mariadb-cluster + app.kubernetes.io/instance: booklore + app.kubernetes.io/version: "0.0.0" + app.kubernetes.io/managed-by: Helm +spec: + mariaDbRef: + name: booklore-mariadb-cluster + namespace: booklore + compression: gzip + maxRetention: 360h + schedule: + cron: 0 0 * * * + immediate: true + suspend: false + storage: + s3: + accessKeyIdSecretKeyRef: + key: access + name: booklore-mariadb-cluster-backup-secret-garage + bucket: mariadb-backups + endpoint: garage-main.garage:3900 + prefix: cl01tl/booklore + region: us-east-1 + secretAccessKeySecretKeyRef: + key: secret + name: booklore-mariadb-cluster-backup-secret-garage +--- +# Source: booklore/templates/replication-destination.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationDestination +metadata: + name: booklore-data-replication-destination + namespace: booklore + labels: + app.kubernetes.io/name: booklore-data-replication-destination + app.kubernetes.io/instance: booklore + app.kubernetes.io/part-of: booklore +spec: + rsyncTLS: + copyMethod: Direct + accessModes: ["ReadWriteMany"] + destinationPVC: booklore-books-nfs-storage + keySecret: booklore-data-replication-secret +--- +# Source: booklore/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: booklore-data-replication-source + namespace: booklore + labels: + app.kubernetes.io/name: booklore-data-replication-source + app.kubernetes.io/instance: booklore + app.kubernetes.io/part-of: booklore +spec: + sourcePVC: booklore-data + trigger: + schedule: "0 0 * * *" + rsyncTLS: + keySecret: booklore-data-replication-secret + address: volsync-rsync-tls-dst-booklore-data-replication-destination + copyMethod: Snapshot +--- +# Source: booklore/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: booklore-config-backup-source + namespace: booklore + labels: + app.kubernetes.io/name: booklore-config-backup-source + app.kubernetes.io/instance: booklore + app.kubernetes.io/part-of: booklore +spec: + sourcePVC: booklore-config + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: booklore-config-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot + cacheCapacity: 10Gi +--- +# Source: booklore/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: booklore-data-backup-source-local + namespace: booklore + labels: + app.kubernetes.io/name: booklore-data-backup-source-local + app.kubernetes.io/instance: booklore + app.kubernetes.io/part-of: booklore +spec: + sourcePVC: booklore-data + trigger: + schedule: 0 2 * * * + restic: + pruneIntervalDays: 7 + repository: booklore-data-backup-secret-local + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot + cacheCapacity: 10Gi +--- +# Source: booklore/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: booklore-data-backup-source-remote + namespace: booklore + labels: + app.kubernetes.io/name: booklore-data-backup-source-remote + app.kubernetes.io/instance: booklore + app.kubernetes.io/part-of: booklore +spec: + sourcePVC: booklore-data + trigger: + schedule: 0 3 * * * + restic: + pruneIntervalDays: 7 + repository: booklore-data-backup-secret-remote + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot + cacheCapacity: 10Gi +--- +# Source: booklore/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: booklore-data-backup-source-external + namespace: booklore + labels: + app.kubernetes.io/name: booklore-data-backup-source-external + app.kubernetes.io/instance: booklore + app.kubernetes.io/part-of: booklore +spec: + sourcePVC: booklore-data + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: booklore-data-backup-secret-external + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot + cacheCapacity: 10Gi +--- +# Source: booklore/charts/mariadb-cluster/templates/user.yaml +apiVersion: k8s.mariadb.com/v1alpha1 +kind: User +metadata: + name: booklore-mariadb-cluster-booklore + namespace: booklore + labels: + helm.sh/chart: mariadb-cluster-25.10.2 + app.kubernetes.io/name: mariadb-cluster + app.kubernetes.io/instance: booklore + app.kubernetes.io/version: "0.0.0" + app.kubernetes.io/managed-by: Helm +spec: + mariaDbRef: + name: booklore-mariadb-cluster + namespace: booklore + cleanupPolicy: Delete + host: '%' + name: booklore + passwordSecretKeyRef: + key: password + name: booklore-database-secret + requeueInterval: 10h + retryInterval: 30s diff --git a/clusters/cl01tl/manifests/code-server/code-server.yaml b/clusters/cl01tl/manifests/code-server/code-server.yaml new file mode 100644 index 000000000..c98687e2c --- /dev/null +++ b/clusters/cl01tl/manifests/code-server/code-server.yaml @@ -0,0 +1,251 @@ +--- +# Source: code-server/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: code-server-nfs-storage + namespace: code-server + labels: + app.kubernetes.io/name: code-server-nfs-storage + app.kubernetes.io/instance: code-server + app.kubernetes.io/part-of: code-server +spec: + volumeMode: Filesystem + storageClassName: nfs-client + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +--- +# Source: code-server/charts/code-server/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: code-server + labels: + app.kubernetes.io/instance: code-server + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: code-server + app.kubernetes.io/service: code-server + helm.sh/chart: code-server-4.4.0 + namespace: code-server +spec: + type: ClusterIP + ports: + - port: 8443 + targetPort: 8443 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: code-server + app.kubernetes.io/name: code-server +--- +# Source: code-server/charts/cloudflared/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: code-server-cloudflared + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: code-server + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cloudflared + app.kubernetes.io/version: 2025.10.0 + helm.sh/chart: cloudflared-1.23.0 + namespace: code-server +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: cloudflared + app.kubernetes.io/instance: code-server + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: code-server + app.kubernetes.io/name: cloudflared + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - args: + - tunnel + - --protocol + - http2 + - --no-autoupdate + - run + - --token + - $(CF_MANAGED_TUNNEL_TOKEN) + env: + - name: CF_MANAGED_TUNNEL_TOKEN + valueFrom: + secretKeyRef: + key: cf-tunnel-token + name: code-server-cloudflared-secret + image: cloudflare/cloudflared:2025.11.1 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: code-server/charts/code-server/templates/common.yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: code-server + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: code-server + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: code-server + helm.sh/chart: code-server-4.4.0 + namespace: code-server +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: code-server + app.kubernetes.io/instance: code-server + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: code-server + app.kubernetes.io/name: code-server + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: PUID + value: "1000" + - name: PGID + value: "1000" + - name: DEFAULT_WORKSPACE + value: /config + envFrom: + - secretRef: + name: codeserver-password-secret + image: ghcr.io/linuxserver/code-server:4.106.2@sha256:a98afdbcb59559f11e5e8df284062e55da1076b2e470e13db4aae133ea82bad0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /config + name: config + volumes: + - name: config + persistentVolumeClaim: + claimName: code-server-nfs-storage +--- +# Source: code-server/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: codeserver-password-secret + namespace: code-server + labels: + app.kubernetes.io/name: codeserver-password-secret + app.kubernetes.io/instance: code-server + app.kubernetes.io/part-of: code-server +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/code-server/auth + metadataPolicy: None + property: PASSWORD + - secretKey: SUDO_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/code-server/auth + metadataPolicy: None + property: SUDO_PASSWORD +--- +# Source: code-server/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: code-server-cloudflared-secret + namespace: code-server + labels: + app.kubernetes.io/name: code-server-cloudflared-secret + app.kubernetes.io/instance: code-server + app.kubernetes.io/part-of: code-server +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: cf-tunnel-token + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cloudflare/tunnels/codeserver + metadataPolicy: None + property: token +--- +# Source: code-server/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-code-server + namespace: code-server + labels: + app.kubernetes.io/name: http-route-code-server + app.kubernetes.io/instance: code-server + app.kubernetes.io/part-of: code-server +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - code-server.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: code-server + port: 8443 + weight: 100 diff --git a/clusters/cl01tl/manifests/directus/directus.yaml b/clusters/cl01tl/manifests/directus/directus.yaml new file mode 100644 index 000000000..f1c7e3835 --- /dev/null +++ b/clusters/cl01tl/manifests/directus/directus.yaml @@ -0,0 +1,1136 @@ +--- +# Source: directus/charts/directus/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: directus + labels: + app.kubernetes.io/instance: directus + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: directus + app.kubernetes.io/service: directus + helm.sh/chart: directus-4.4.0 + namespace: directus +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 8055 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: directus + app.kubernetes.io/name: directus +--- +# Source: directus/charts/cloudflared-directus/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: directus-cloudflared-directus + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: directus + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cloudflared-directus + app.kubernetes.io/version: 2025.10.0 + helm.sh/chart: cloudflared-directus-1.23.0 + namespace: directus +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: cloudflared-directus + app.kubernetes.io/instance: directus + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: directus + app.kubernetes.io/name: cloudflared-directus + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - args: + - tunnel + - --protocol + - http2 + - --no-autoupdate + - run + - --token + - $(CF_MANAGED_TUNNEL_TOKEN) + env: + - name: CF_MANAGED_TUNNEL_TOKEN + valueFrom: + secretKeyRef: + key: cf-tunnel-token + name: directus-cloudflared-secret + image: cloudflare/cloudflared:2025.11.1 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: directus/charts/directus/templates/common.yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: directus + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: directus + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: directus + helm.sh/chart: directus-4.4.0 + namespace: directus +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: directus + app.kubernetes.io/instance: directus + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: directus + app.kubernetes.io/name: directus + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: PUBLIC_URL + value: https://directus.alexlebens.dev + - name: WEBSOCKETS_ENABLED + value: "true" + - name: ADMIN_EMAIL + valueFrom: + secretKeyRef: + key: admin-email + name: directus-config + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + key: admin-password + name: directus-config + - name: SECRET + valueFrom: + secretKeyRef: + key: secret + name: directus-config + - name: KEY + valueFrom: + secretKeyRef: + key: key + name: directus-config + - name: DB_CLIENT + value: postgres + - name: DB_HOST + valueFrom: + secretKeyRef: + key: host + name: directus-postgresql-17-cluster-app + - name: DB_DATABASE + valueFrom: + secretKeyRef: + key: dbname + name: directus-postgresql-17-cluster-app + - name: DB_PORT + valueFrom: + secretKeyRef: + key: port + name: directus-postgresql-17-cluster-app + - name: DB_USER + valueFrom: + secretKeyRef: + key: user + name: directus-postgresql-17-cluster-app + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: directus-postgresql-17-cluster-app + - name: SYNCHRONIZATION_STORE + value: redis + - name: CACHE_ENABLED + value: "true" + - name: CACHE_STORE + value: redis + - name: REDIS_ENABLED + value: "true" + - name: REDIS_HOST + value: redis-replication-directus-master + - name: REDIS_USERNAME + valueFrom: + secretKeyRef: + key: user + name: directus-redis-config + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: directus-redis-config + - name: STORAGE_LOCATIONS + value: s3 + - name: STORAGE_S3_DRIVER + value: s3 + - name: STORAGE_S3_KEY + valueFrom: + secretKeyRef: + key: AWS_ACCESS_KEY_ID + name: ceph-bucket-directus + - name: STORAGE_S3_SECRET + valueFrom: + secretKeyRef: + key: AWS_SECRET_ACCESS_KEY + name: ceph-bucket-directus + - name: STORAGE_S3_BUCKET + valueFrom: + configMapKeyRef: + key: BUCKET_NAME + name: ceph-bucket-directus + - name: STORAGE_S3_REGION + value: us-east-1 + - name: STORAGE_S3_ENDPOINT + value: http://rook-ceph-rgw-ceph-objectstore.rook-ceph.svc:80 + - name: STORAGE_S3_FORCE_PATH_STYLE + value: "true" + - name: AUTH_PROVIDERS + value: AUTHENTIK + - name: AUTH_AUTHENTIK_DRIVER + value: openid + - name: AUTH_AUTHENTIK_CLIENT_ID + valueFrom: + secretKeyRef: + key: OIDC_CLIENT_ID + name: directus-oidc-secret + - name: AUTH_AUTHENTIK_CLIENT_SECRET + valueFrom: + secretKeyRef: + key: OIDC_CLIENT_SECRET + name: directus-oidc-secret + - name: AUTH_AUTHENTIK_SCOPE + value: openid profile email + - name: AUTH_AUTHENTIK_ISSUER_URL + value: https://auth.alexlebens.dev/application/o/directus/.well-known/openid-configuration + - name: AUTH_AUTHENTIK_IDENTIFIER_KEY + value: email + - name: AUTH_AUTHENTIK_ALLOW_PUBLIC_REGISTRATION + value: "true" + - name: AUTH_AUTHENTIK_LABEL + value: Authentik + - name: TELEMETRY + value: "false" + - name: METRICS_ENABLED + value: "true" + - name: METRICS_TOKENS + valueFrom: + secretKeyRef: + key: metric-token + name: directus-metric-token + image: directus/directus:11.13.4 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 256Mi +--- +# Source: directus/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: directus-postgresql-17-cluster + namespace: directus + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: directus-postgresql-17 + app.kubernetes.io/instance: directus + app.kubernetes.io/part-of: directus + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "directus-postgresql-17-external-backup" + serverName: "directus-postgresql-17-backup-1" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "directus-postgresql-17-garage-local-backup" + serverName: "directus-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "directus-postgresql-17-recovery" + serverName: directus-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 256Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: directus-postgresql-17-backup-1 + + externalClusters: + - name: directus-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "directus-postgresql-17-recovery" + serverName: directus-postgresql-17-backup-1 +--- +# Source: directus/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: directus-config + namespace: directus + labels: + app.kubernetes.io/name: directus-config + app.kubernetes.io/instance: directus + app.kubernetes.io/part-of: directus +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: admin-email + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/directus/config + metadataPolicy: None + property: admin-email + - secretKey: admin-password + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/directus/config + metadataPolicy: None + property: admin-password + - secretKey: secret + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/directus/config + metadataPolicy: None + property: secret + - secretKey: key + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/directus/config + metadataPolicy: None + property: key +--- +# Source: directus/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: directus-metric-token + namespace: directus + labels: + app.kubernetes.io/name: directus-metric-token + app.kubernetes.io/instance: directus + app.kubernetes.io/part-of: directus +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: metric-token + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/directus/metrics + metadataPolicy: None + property: metric-token +--- +# Source: directus/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: directus-redis-config + namespace: directus + labels: + app.kubernetes.io/name: directus-redis-config + app.kubernetes.io/instance: directus + app.kubernetes.io/part-of: directus +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: user + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/directus/redis + metadataPolicy: None + property: user + - secretKey: password + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/directus/redis + metadataPolicy: None + property: password +--- +# Source: directus/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: directus-oidc-secret + namespace: directus + labels: + app.kubernetes.io/name: directus-oidc-secret + app.kubernetes.io/instance: directus + app.kubernetes.io/part-of: directus +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: OIDC_CLIENT_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/directus + metadataPolicy: None + property: client + - secretKey: OIDC_CLIENT_SECRET + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/directus + metadataPolicy: None + property: secret +--- +# Source: directus/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: directus-cloudflared-secret + namespace: directus + labels: + app.kubernetes.io/name: directus-cloudflared-secret + app.kubernetes.io/instance: directus + app.kubernetes.io/part-of: directus +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: cf-tunnel-token + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cloudflare/tunnels/directus + metadataPolicy: None + property: token +--- +# Source: directus/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: directus-postgresql-17-cluster-backup-secret + namespace: directus + labels: + app.kubernetes.io/name: directus-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: directus + app.kubernetes.io/part-of: directus +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: directus/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: directus-postgresql-17-cluster-backup-secret-weekly + namespace: directus + labels: + app.kubernetes.io/name: directus-postgresql-17-cluster-backup-secret-weekly + app.kubernetes.io/instance: directus + app.kubernetes.io/part-of: directus +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY +--- +# Source: directus/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: directus-postgresql-17-cluster-backup-secret-garage + namespace: directus + labels: + app.kubernetes.io/name: directus-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: directus + app.kubernetes.io/part-of: directus +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: directus/templates/object-bucket-claim.yaml +apiVersion: objectbucket.io/v1alpha1 +kind: ObjectBucketClaim +metadata: + name: ceph-bucket-directus + labels: + app.kubernetes.io/name: ceph-bucket-directus + app.kubernetes.io/instance: directus + app.kubernetes.io/part-of: directus +spec: + generateBucketName: bucket-directus + storageClassName: ceph-bucket +--- +# Source: directus/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "directus-postgresql-17-external-backup" + namespace: directus + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: directus-postgresql-17 + app.kubernetes.io/instance: directus + app.kubernetes.io/part-of: directus + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/directus/directus-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: directus-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: directus-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: directus/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "directus-postgresql-17-garage-local-backup" + namespace: directus + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: directus-postgresql-17 + app.kubernetes.io/instance: directus + app.kubernetes.io/part-of: directus + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/directus/directus-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: directus-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: directus-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: directus-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: directus/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "directus-postgresql-17-recovery" + namespace: directus + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: directus-postgresql-17 + app.kubernetes.io/instance: directus + app.kubernetes.io/part-of: directus + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/directus/directus-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: directus-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: directus-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: directus/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: directus-postgresql-17-alert-rules + namespace: directus + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: directus-postgresql-17 + app.kubernetes.io/instance: directus + app.kubernetes.io/part-of: directus + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/directus-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: directus + cnpg_cluster: directus-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: directus + cnpg_cluster: directus-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="directus"} - cnpg_pg_replication_is_wal_receiver_up{namespace="directus"}) < 1 + for: 5m + labels: + severity: critical + namespace: directus + cnpg_cluster: directus-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="directus"} - cnpg_pg_replication_is_wal_receiver_up{namespace="directus"}) < 2 + for: 5m + labels: + severity: warning + namespace: directus + cnpg_cluster: directus-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "directus/directus-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="directus", pod=~"directus-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="directus", pod=~"directus-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: directus + cnpg_cluster: directus-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "directus/directus-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="directus", pod=~"directus-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="directus", pod=~"directus-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: directus + cnpg_cluster: directus-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "directus/directus-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="directus",pod=~"directus-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: directus + cnpg_cluster: directus-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "directus/directus-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="directus", pod=~"directus-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: directus + cnpg_cluster: directus-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: directus + cnpg_cluster: directus-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "directus/directus-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="directus", persistentvolumeclaim=~"directus-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="directus", persistentvolumeclaim=~"directus-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="directus", persistentvolumeclaim=~"directus-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="directus", persistentvolumeclaim=~"directus-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="directus", persistentvolumeclaim=~"directus-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="directus", persistentvolumeclaim=~"directus-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"directus-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: directus + cnpg_cluster: directus-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "directus/directus-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="directus", persistentvolumeclaim=~"directus-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="directus", persistentvolumeclaim=~"directus-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="directus", persistentvolumeclaim=~"directus-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="directus", persistentvolumeclaim=~"directus-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="directus", persistentvolumeclaim=~"directus-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="directus", persistentvolumeclaim=~"directus-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"directus-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: directus + cnpg_cluster: directus-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "directus/directus-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="directus",pod=~"directus-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: directus + cnpg_cluster: directus-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: directus + cnpg_cluster: directus-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: directus + cnpg_cluster: directus-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: directus + cnpg_cluster: directus-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "directus/directus-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="directus", pod=~"directus-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: directus + cnpg_cluster: directus-postgresql-17-cluster +--- +# Source: directus/templates/redis-replication.yaml +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisReplication +metadata: + name: redis-replication-directus + namespace: directus + labels: + app.kubernetes.io/name: redis-replication-directus + app.kubernetes.io/instance: directus + app.kubernetes.io/part-of: directus +spec: + clusterSize: 3 + podSecurityContext: + runAsUser: 1000 + fsGroup: 1000 + kubernetesConfig: + image: quay.io/opstree/redis:v8.2.1 + imagePullPolicy: IfNotPresent + redisSecret: + name: directus-redis-config + key: password + resources: + requests: + cpu: 50m + memory: 128Mi + storage: + volumeClaimTemplate: + spec: + storageClassName: ceph-block + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi + redisExporter: + enabled: true + image: quay.io/opstree/redis-exporter:v1.76.0 +--- +# Source: directus/templates/redis-sentinel.yaml +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisSentinel +metadata: + name: redis-sentinel-directus + namespace: directus + labels: + app.kubernetes.io/name: redis-sentinel-directus + app.kubernetes.io/instance: directus + app.kubernetes.io/part-of: directus +spec: + clusterSize: 3 + podSecurityContext: + runAsUser: 1000 + fsGroup: 1000 + redisSentinelConfig: + redisReplicationName: redis-replication-directus + redisReplicationPassword: + secretKeyRef: + name: directus-redis-config + key: password + kubernetesConfig: + image: quay.io/opstree/redis-sentinel:v7.0.15 + imagePullPolicy: IfNotPresent + redisSecret: + name: directus-redis-config + key: password + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: directus/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "directus-postgresql-17-daily-backup-scheduled-backup" + namespace: directus + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: directus-postgresql-17 + app.kubernetes.io/instance: directus + app.kubernetes.io/part-of: directus + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: directus-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "directus-postgresql-17-external-backup" +--- +# Source: directus/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "directus-postgresql-17-live-backup-scheduled-backup" + namespace: directus + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: directus-postgresql-17 + app.kubernetes.io/instance: directus + app.kubernetes.io/part-of: directus + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: directus-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "directus-postgresql-17-garage-local-backup" +--- +# Source: directus/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: directus + namespace: directus + labels: + app.kubernetes.io/name: directus + app.kubernetes.io/instance: directus + app.kubernetes.io/part-of: directus +spec: + selector: + matchLabels: + app.kubernetes.io/name: directus + app.kubernetes.io/instance: directus + endpoints: + - port: http + interval: 30s + scrapeTimeout: 15s + path: /metrics + bearerTokenSecret: + name: directus-metric-token + key: metric-token +--- +# Source: directus/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: redis-replication-directus + namespace: directus + labels: + app.kubernetes.io/name: redis-replication-directus + app.kubernetes.io/instance: directus + app.kubernetes.io/part-of: directus + redis-operator: "true" + env: production +spec: + selector: + matchLabels: + redis_setup_type: replication + endpoints: + - port: redis-exporter + interval: 30s + scrapeTimeout: 10s diff --git a/clusters/cl01tl/manifests/element-web/element-web.yaml b/clusters/cl01tl/manifests/element-web/element-web.yaml new file mode 100644 index 000000000..6c3419f34 --- /dev/null +++ b/clusters/cl01tl/manifests/element-web/element-web.yaml @@ -0,0 +1,258 @@ +--- +# Source: element-web/charts/element-web/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: element-web + labels: + helm.sh/chart: element-web-1.4.24 + app.kubernetes.io/name: element-web + app.kubernetes.io/instance: element-web + app.kubernetes.io/version: "1.12.4" + app.kubernetes.io/managed-by: Helm +--- +# Source: element-web/charts/element-web/templates/configuration-nginx.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: element-web-nginx + labels: + helm.sh/chart: element-web-1.4.24 + app.kubernetes.io/name: element-web + app.kubernetes.io/instance: element-web + app.kubernetes.io/version: "1.12.4" + app.kubernetes.io/managed-by: Helm +data: + default.conf: | + server { + listen 8080; + listen [::]:8080; + server_name localhost; + + root /usr/share/nginx/html; + index index.html; + + add_header X-Frame-Options SAMEORIGIN; + add_header X-Content-Type-Options nosniff; + add_header X-XSS-Protection "1; mode=block"; + add_header Content-Security-Policy "frame-ancestors 'self'"; + + # Set no-cache for the index.html only so that browsers always check for a new copy of Element Web. + location = /index.html { + add_header Cache-Control "no-cache"; + } + + # redirect server error pages to the static page /50x.html + # + error_page 500 502 503 504 /50x.html; + } +--- +# Source: element-web/charts/element-web/templates/configuration.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: element-web + labels: + helm.sh/chart: element-web-1.4.24 + app.kubernetes.io/name: element-web + app.kubernetes.io/instance: element-web + app.kubernetes.io/version: "1.12.4" + app.kubernetes.io/managed-by: Helm +data: + config.json: | + {"brand":"Alex Lebens","branding":{"auth_header_logo_url":"https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/logo-new-round.png","welcome_background_url":"https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/background.jpg"},"default_country_code":"US","default_server_config":{"m.homeserver":{"base_url":"https://matrix.alexlebens.dev","server_name":"alexlebens.dev"},"m.identity_server":{"base_url":"https://alexlebens.dev"}},"default_theme":"dark","disable_3pid_login":true,"sso_redirect_options":{"immediate":true}} +--- +# Source: element-web/charts/element-web/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: element-web + labels: + helm.sh/chart: element-web-1.4.24 + app.kubernetes.io/name: element-web + app.kubernetes.io/instance: element-web + app.kubernetes.io/version: "1.12.4" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app.kubernetes.io/name: element-web + app.kubernetes.io/instance: element-web +--- +# Source: element-web/charts/cloudflared/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: element-web-cloudflared + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: element-web + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cloudflared + app.kubernetes.io/version: 2025.10.0 + helm.sh/chart: cloudflared-1.23.0 + namespace: element-web +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: cloudflared + app.kubernetes.io/instance: element-web + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: element-web + app.kubernetes.io/name: cloudflared + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - args: + - tunnel + - --protocol + - http2 + - --no-autoupdate + - run + - --token + - $(CF_MANAGED_TUNNEL_TOKEN) + env: + - name: CF_MANAGED_TUNNEL_TOKEN + valueFrom: + secretKeyRef: + key: cf-tunnel-token + name: element-web-cloudflared-secret + image: cloudflare/cloudflared:2025.11.1 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: element-web/charts/element-web/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: element-web + labels: + helm.sh/chart: element-web-1.4.24 + app.kubernetes.io/name: element-web + app.kubernetes.io/instance: element-web + app.kubernetes.io/version: "1.12.4" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: element-web + app.kubernetes.io/instance: element-web + template: + metadata: + annotations: + checksum/config: e4e49fadd0eaedd59d5adab594fb3e159fcaaecf883c31012f72a55c7785e1c4 + checksum/config-nginx: 0d6dce57e41259f77d072cd0381296fb272ba1c62d8817d5fd742da9ccce5aa1 + labels: + app.kubernetes.io/name: element-web + app.kubernetes.io/instance: element-web + spec: + serviceAccountName: element-web + securityContext: + {} + containers: + - name: element-web + securityContext: + {} + image: "vectorim/element-web:v1.12.4" + imagePullPolicy: IfNotPresent + env: + - name: ELEMENT_WEB_PORT + value: '8080' + ports: + - name: http + containerPort: 8080 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /app/config.json + name: config + subPath: config.json + - mountPath: /etc/nginx/conf.d/config.json + name: config-nginx + subPath: config.json + volumes: + - name: config + configMap: + name: element-web + - name: config-nginx + configMap: + name: element-web-nginx +--- +# Source: element-web/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: element-web-cloudflared-secret + namespace: element-web + labels: + app.kubernetes.io/name: element-web-cloudflared-secret + app.kubernetes.io/instance: element-web + app.kubernetes.io/part-of: element-web +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: cf-tunnel-token + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cloudflare/tunnels/element + metadataPolicy: None + property: token +--- +# Source: element-web/charts/element-web/templates/tests/test-connection.yaml +apiVersion: v1 +kind: Pod +metadata: + name: "element-web-test-connection" + labels: + helm.sh/chart: element-web-1.4.24 + app.kubernetes.io/name: element-web + app.kubernetes.io/instance: element-web + app.kubernetes.io/version: "1.12.4" + app.kubernetes.io/managed-by: Helm + annotations: + "helm.sh/hook": test-success +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['element-web:80'] + restartPolicy: Never diff --git a/clusters/cl01tl/manifests/ephemera/ephemera.yaml b/clusters/cl01tl/manifests/ephemera/ephemera.yaml new file mode 100644 index 000000000..664b082ff --- /dev/null +++ b/clusters/cl01tl/manifests/ephemera/ephemera.yaml @@ -0,0 +1,360 @@ +--- +# Source: ephemera/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ephemera-import-nfs-storage + namespace: ephemera + labels: + app.kubernetes.io/name: ephemera-import-nfs-storage + app.kubernetes.io/instance: ephemera + app.kubernetes.io/part-of: ephemera +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage/Books Import + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: ephemera/charts/ephemera/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: ephemera + labels: + app.kubernetes.io/instance: ephemera + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: ephemera + helm.sh/chart: ephemera-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: ephemera +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: ephemera/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ephemera-import-nfs-storage + namespace: ephemera + labels: + app.kubernetes.io/name: ephemera-import-nfs-storage + app.kubernetes.io/instance: ephemera + app.kubernetes.io/part-of: ephemera +spec: + volumeName: ephemera-import-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: ephemera/charts/ephemera/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: ephemera + labels: + app.kubernetes.io/instance: ephemera + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: ephemera + app.kubernetes.io/service: ephemera + helm.sh/chart: ephemera-4.4.0 + namespace: ephemera +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 8286 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: ephemera + app.kubernetes.io/name: ephemera +--- +# Source: ephemera/charts/ephemera/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ephemera + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: ephemera + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: ephemera + helm.sh/chart: ephemera-4.4.0 + namespace: ephemera +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: ephemera + app.kubernetes.io/instance: ephemera + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: ephemera + app.kubernetes.io/name: ephemera + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: APPRISE_STORAGE_MODE + value: memory + - name: APPRISE_STATEFUL_MODE + value: disabled + - name: APPRISE_WORKER_COUNT + value: "1" + - name: APPRISE_STATELESS_URLS + valueFrom: + secretKeyRef: + key: ntfy-url + name: ephemera-apprise-config + image: caronc/apprise:1.2.6 + imagePullPolicy: IfNotPresent + name: apprise-api + resources: + requests: + cpu: 10m + memory: 128Mi + - env: + - name: LOG_LEVEL + value: info + - name: LOG_HTML + value: "false" + - name: CAPTCHA_SOLVER + value: none + - name: TZ + value: America/Chicago + image: ghcr.io/flaresolverr/flaresolverr:v3.4.5 + imagePullPolicy: IfNotPresent + name: flaresolverr + resources: + requests: + cpu: 10m + memory: 128Mi + - env: + - name: AA_BASE_URL + value: https://annas-archive.org + - name: FLARESOLVERR_URL + value: http://127.0.0.1:8191 + - name: LG_BASE_URL + value: https://gen.com + - name: PUID + value: "0" + - name: PGID + value: "0" + image: ghcr.io/orwellianepilogue/ephemera:1.3.1 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 50m + memory: 128Mi + volumeMounts: + - mountPath: /app/downloads + name: cache + - mountPath: /app/data + name: config + - mountPath: /app/ingest + name: ingest + volumes: + - emptyDir: {} + name: cache + - name: config + persistentVolumeClaim: + claimName: ephemera + - name: ingest + persistentVolumeClaim: + claimName: ephemera-import-nfs-storage +--- +# Source: ephemera/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: ephemera-key-secret + namespace: ephemera + labels: + app.kubernetes.io/name: ephemera-key-secret + app.kubernetes.io/instance: ephemera + app.kubernetes.io/part-of: ephemera +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: key + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/ephemera/config + metadataPolicy: None + property: key +--- +# Source: ephemera/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: ephemera-apprise-config + namespace: ephemera + labels: + app.kubernetes.io/name: ephemera-apprise-config + app.kubernetes.io/instance: ephemera + app.kubernetes.io/part-of: ephemera +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ntfy-url + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/ephemera/config + metadataPolicy: None + property: ntfy-url +--- +# Source: ephemera/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: ephemera-config-backup-secret + namespace: ephemera + labels: + app.kubernetes.io/name: ephemera-config-backup-secret + app.kubernetes.io/instance: ephemera + app.kubernetes.io/part-of: ephemera +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/ephemera/ephemera-config" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: ephemera/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-ephemera + namespace: ephemera + labels: + app.kubernetes.io/name: http-route-ephemera + app.kubernetes.io/instance: ephemera + app.kubernetes.io/part-of: ephemera +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - ephemera.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: ephemera + port: 80 + weight: 100 +--- +# Source: ephemera/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: ephemera-config-backup-source + namespace: ephemera + labels: + app.kubernetes.io/name: ephemera-config-backup-source + app.kubernetes.io/instance: ephemera + app.kubernetes.io/part-of: ephemera +spec: + sourcePVC: ephemera-config + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: ephemera-config-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot + cacheCapacity: 10Gi diff --git a/clusters/cl01tl/manifests/freshrss/freshrss.yaml b/clusters/cl01tl/manifests/freshrss/freshrss.yaml new file mode 100644 index 000000000..6741009cf --- /dev/null +++ b/clusters/cl01tl/manifests/freshrss/freshrss.yaml @@ -0,0 +1,1085 @@ +--- +# Source: freshrss/charts/freshrss/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: freshrss-extensions + labels: + app.kubernetes.io/instance: freshrss + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: freshrss + helm.sh/chart: freshrss-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: freshrss +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "1Gi" + storageClassName: "ceph-block" +--- +# Source: freshrss/charts/freshrss/templates/common.yaml +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: freshrss-data + labels: + app.kubernetes.io/instance: freshrss + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: freshrss + helm.sh/chart: freshrss-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: freshrss +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: freshrss/charts/freshrss/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: freshrss + labels: + app.kubernetes.io/instance: freshrss + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: freshrss + app.kubernetes.io/service: freshrss + helm.sh/chart: freshrss-4.4.0 + namespace: freshrss +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: freshrss + app.kubernetes.io/name: freshrss +--- +# Source: freshrss/charts/cloudflared/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: freshrss-cloudflared + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: freshrss + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cloudflared + app.kubernetes.io/version: 2025.10.0 + helm.sh/chart: cloudflared-1.23.0 + namespace: freshrss +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: cloudflared + app.kubernetes.io/instance: freshrss + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: freshrss + app.kubernetes.io/name: cloudflared + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - args: + - tunnel + - --protocol + - http2 + - --no-autoupdate + - run + - --token + - $(CF_MANAGED_TUNNEL_TOKEN) + env: + - name: CF_MANAGED_TUNNEL_TOKEN + valueFrom: + secretKeyRef: + key: cf-tunnel-token + name: freshrss-cloudflared-secret + image: cloudflare/cloudflared:2025.11.1 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: freshrss/charts/freshrss/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: freshrss + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: freshrss + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: freshrss + helm.sh/chart: freshrss-4.4.0 + namespace: freshrss +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: freshrss + app.kubernetes.io/instance: freshrss + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: freshrss + app.kubernetes.io/name: freshrss + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + initContainers: + - command: + - /bin/sh + - -ec + - | + apk add --no-cache git; + cd /tmp; + git clone -n --depth=1 --filter=tree:0 https://github.com/cn-tools/cntools_FreshRssExtensions.git; + cd cntools_FreshRssExtensions; + git sparse-checkout set --no-cone /xExtension-YouTubeChannel2RssFeed; + git checkout; + rm -rf /var/www/FreshRSS/extensions/xExtension-YouTubeChannel2RssFeed + cp -r xExtension-YouTubeChannel2RssFeed /var/www/FreshRSS/extensions + chown -R 568:568 /var/www/FreshRSS/extensions/xExtension-YouTubeChannel2RssFeed + image: alpine:3.22.2 + imagePullPolicy: IfNotPresent + name: init-download-extension-1 + resources: + requests: + cpu: 10m + memory: 128Mi + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: /var/www/FreshRSS/extensions + name: extensions + - command: + - /bin/sh + - -ec + - | + apk add --no-cache git; + cd /tmp; + git clone -n --depth=1 --filter=tree:0 https://github.com/FreshRSS/Extensions.git; + cd Extensions; + git sparse-checkout set --no-cone /xExtension-ImageProxy; + git checkout; + rm -rf /var/www/FreshRSS/extensions/xExtension-ImageProxy + cp -r xExtension-ImageProxy /var/www/FreshRSS/extensions + chown -R 568:568 /var/www/FreshRSS/extensions/xExtension-ImageProxy + image: alpine:3.22.2 + imagePullPolicy: IfNotPresent + name: init-download-extension-2 + resources: + requests: + cpu: 10m + memory: 128Mi + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: /var/www/FreshRSS/extensions + name: extensions + - command: + - /bin/sh + - -ec + - | + cd /tmp; + wget https://github.com/zimmra/xExtension-karakeep-button/archive/refs/tags/v1.1.tar.gz; + tar -xvzf *.tar.gz; + rm -rf /var/www/FreshRSS/extensions/xExtension-karakeep-button + mkdir /var/www/FreshRSS/extensions/xExtension-karakeep-button + cp -r /tmp/xExtension-karakeep-button-*/* /var/www/FreshRSS/extensions/xExtension-karakeep-button + chown -R 568:568 /var/www/FreshRSS/extensions/xExtension-karakeep-button + image: alpine:3.22.2 + imagePullPolicy: IfNotPresent + name: init-download-extension-3 + resources: + requests: + cpu: 10m + memory: 128Mi + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: /var/www/FreshRSS/extensions + name: extensions + containers: + - env: + - name: PGID + value: "568" + - name: PUID + value: "568" + - name: TZ + value: US/Central + - name: FRESHRSS_ENV + value: production + - name: CRON_MIN + value: 13,43 + - name: BASE_URL + value: https://rss.alexlebens.dev + - name: DB_HOST + valueFrom: + secretKeyRef: + key: host + name: freshrss-postgresql-17-cluster-app + - name: DB_BASE + valueFrom: + secretKeyRef: + key: dbname + name: freshrss-postgresql-17-cluster-app + - name: DB_USER + valueFrom: + secretKeyRef: + key: user + name: freshrss-postgresql-17-cluster-app + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: freshrss-postgresql-17-cluster-app + - name: FRESHRSS_INSTALL + value: | + --api-enabled + --base-url $(BASE_URL) + --db-base $(DB_BASE) + --db-host $(DB_HOST) + --db-password $(DB_PASSWORD) + --db-type pgsql + --db-user $(DB_USER) + --auth-type http_auth + --default-user admin + --language en + - name: FRESHRSS_USER + value: | + --api-password $(ADMIN_API_PASSWORD) + --email $(ADMIN_EMAIL) + --language en + --password $(ADMIN_PASSWORD) + --user admin + - name: OIDC_ENABLED + value: "1" + - name: OIDC_PROVIDER_METADATA_URL + value: https://auth.alexlebens.dev/application/o/freshrss/.well-known/openid-configuration + - name: OIDC_X_FORWARDED_HEADERS + value: X-Forwarded-Port X-Forwarded-Proto X-Forwarded-Host + - name: OIDC_SCOPES + value: openid email profile + - name: OIDC_REMOTE_USER_CLAIM + value: preferred_username + envFrom: + - secretRef: + name: freshrss-oidc-secret + - secretRef: + name: freshrss-install-secret + image: freshrss/freshrss:1.27.1 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /var/www/FreshRSS/data + name: data + - mountPath: /var/www/FreshRSS/extensions + name: extensions + volumes: + - name: data + persistentVolumeClaim: + claimName: freshrss-data + - name: extensions + persistentVolumeClaim: + claimName: freshrss-extensions +--- +# Source: freshrss/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: freshrss-postgresql-17-cluster + namespace: freshrss + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: freshrss-postgresql-17 + app.kubernetes.io/instance: freshrss + app.kubernetes.io/part-of: freshrss + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "freshrss-postgresql-17-external-backup" + serverName: "freshrss-postgresql-17-backup-1" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "freshrss-postgresql-17-garage-local-backup" + serverName: "freshrss-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "freshrss-postgresql-17-recovery" + serverName: freshrss-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 256Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: freshrss-postgresql-17-backup-1 + + externalClusters: + - name: freshrss-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "freshrss-postgresql-17-recovery" + serverName: freshrss-postgresql-17-backup-1 +--- +# Source: freshrss/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: freshrss-install-secret + namespace: freshrss + labels: + app.kubernetes.io/name: freshrss-install-secret + app.kubernetes.io/instance: freshrss + app.kubernetes.io/part-of: freshrss +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ADMIN_EMAIL + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/freshrss/config + metadataPolicy: None + property: ADMIN_EMAIL + - secretKey: ADMIN_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/freshrss/config + metadataPolicy: None + property: ADMIN_PASSWORD + - secretKey: ADMIN_API_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/freshrss/config + metadataPolicy: None + property: ADMIN_API_PASSWORD +--- +# Source: freshrss/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: freshrss-oidc-secret + namespace: freshrss + labels: + app.kubernetes.io/name: freshrss-oidc-secret + app.kubernetes.io/instance: freshrss + app.kubernetes.io/part-of: freshrss +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: OIDC_CLIENT_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/freshrss + metadataPolicy: None + property: client + - secretKey: OIDC_CLIENT_SECRET + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/freshrss + metadataPolicy: None + property: secret + - secretKey: OIDC_CLIENT_CRYPTO_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/freshrss + metadataPolicy: None + property: crypto-key +--- +# Source: freshrss/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: freshrss-cloudflared-secret + namespace: freshrss + labels: + app.kubernetes.io/name: freshrss-cloudflared-secret + app.kubernetes.io/instance: freshrss + app.kubernetes.io/part-of: freshrss +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: cf-tunnel-token + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cloudflare/tunnels/freshrss + metadataPolicy: None + property: token +--- +# Source: freshrss/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: freshrss-data-backup-secret + namespace: freshrss + labels: + app.kubernetes.io/name: freshrss-data-backup-secret + app.kubernetes.io/instance: freshrss + app.kubernetes.io/part-of: freshrss +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/freshrss/freshrss-data" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: freshrss/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: freshrss-postgresql-17-cluster-backup-secret + namespace: freshrss + labels: + app.kubernetes.io/name: freshrss-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: freshrss + app.kubernetes.io/part-of: freshrss +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: freshrss/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: freshrss-postgresql-17-cluster-backup-secret-garage + namespace: freshrss + labels: + app.kubernetes.io/name: freshrss-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: freshrss + app.kubernetes.io/part-of: freshrss +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: freshrss/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "freshrss-postgresql-17-external-backup" + namespace: freshrss + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: freshrss-postgresql-17 + app.kubernetes.io/instance: freshrss + app.kubernetes.io/part-of: freshrss + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/freshrss/freshrss-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: freshrss-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: freshrss-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: freshrss/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "freshrss-postgresql-17-garage-local-backup" + namespace: freshrss + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: freshrss-postgresql-17 + app.kubernetes.io/instance: freshrss + app.kubernetes.io/part-of: freshrss + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/freshrss/freshrss-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: freshrss-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: freshrss-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: freshrss-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: freshrss/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "freshrss-postgresql-17-recovery" + namespace: freshrss + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: freshrss-postgresql-17 + app.kubernetes.io/instance: freshrss + app.kubernetes.io/part-of: freshrss + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/freshrss/freshrss-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: freshrss-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: freshrss-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: freshrss/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: freshrss-postgresql-17-alert-rules + namespace: freshrss + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: freshrss-postgresql-17 + app.kubernetes.io/instance: freshrss + app.kubernetes.io/part-of: freshrss + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/freshrss-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: freshrss + cnpg_cluster: freshrss-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: freshrss + cnpg_cluster: freshrss-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="freshrss"} - cnpg_pg_replication_is_wal_receiver_up{namespace="freshrss"}) < 1 + for: 5m + labels: + severity: critical + namespace: freshrss + cnpg_cluster: freshrss-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="freshrss"} - cnpg_pg_replication_is_wal_receiver_up{namespace="freshrss"}) < 2 + for: 5m + labels: + severity: warning + namespace: freshrss + cnpg_cluster: freshrss-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "freshrss/freshrss-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="freshrss", pod=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="freshrss", pod=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: freshrss + cnpg_cluster: freshrss-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "freshrss/freshrss-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="freshrss", pod=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="freshrss", pod=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: freshrss + cnpg_cluster: freshrss-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "freshrss/freshrss-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="freshrss",pod=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: freshrss + cnpg_cluster: freshrss-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "freshrss/freshrss-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="freshrss", pod=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: freshrss + cnpg_cluster: freshrss-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: freshrss + cnpg_cluster: freshrss-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "freshrss/freshrss-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="freshrss", persistentvolumeclaim=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="freshrss", persistentvolumeclaim=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="freshrss", persistentvolumeclaim=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="freshrss", persistentvolumeclaim=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="freshrss", persistentvolumeclaim=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="freshrss", persistentvolumeclaim=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: freshrss + cnpg_cluster: freshrss-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "freshrss/freshrss-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="freshrss", persistentvolumeclaim=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="freshrss", persistentvolumeclaim=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="freshrss", persistentvolumeclaim=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="freshrss", persistentvolumeclaim=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="freshrss", persistentvolumeclaim=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="freshrss", persistentvolumeclaim=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: freshrss + cnpg_cluster: freshrss-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "freshrss/freshrss-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="freshrss",pod=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: freshrss + cnpg_cluster: freshrss-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: freshrss + cnpg_cluster: freshrss-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: freshrss + cnpg_cluster: freshrss-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: freshrss + cnpg_cluster: freshrss-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "freshrss/freshrss-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="freshrss", pod=~"freshrss-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: freshrss + cnpg_cluster: freshrss-postgresql-17-cluster +--- +# Source: freshrss/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: freshrss-data-backup-source + namespace: freshrss + labels: + app.kubernetes.io/name: freshrss-data-backup-source + app.kubernetes.io/instance: freshrss + app.kubernetes.io/part-of: freshrss +spec: + sourcePVC: freshrss-data + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: freshrss-data-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + moverSecurityContext: + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + supplementalGroups: + - 44 + - 100 + - 109 + - 65539 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: freshrss/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "freshrss-postgresql-17-daily-backup-scheduled-backup" + namespace: freshrss + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: freshrss-postgresql-17 + app.kubernetes.io/instance: freshrss + app.kubernetes.io/part-of: freshrss + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: freshrss-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "freshrss-postgresql-17-external-backup" +--- +# Source: freshrss/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "freshrss-postgresql-17-live-backup-scheduled-backup" + namespace: freshrss + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: freshrss-postgresql-17 + app.kubernetes.io/instance: freshrss + app.kubernetes.io/part-of: freshrss + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: freshrss-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "freshrss-postgresql-17-garage-local-backup" diff --git a/clusters/cl01tl/manifests/home-assistant/home-assistant.yaml b/clusters/cl01tl/manifests/home-assistant/home-assistant.yaml new file mode 100644 index 000000000..0aae17be0 --- /dev/null +++ b/clusters/cl01tl/manifests/home-assistant/home-assistant.yaml @@ -0,0 +1,283 @@ +--- +# Source: home-assistant/charts/home-assistant/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: home-assistant-config + labels: + app.kubernetes.io/instance: home-assistant + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: home-assistant + helm.sh/chart: home-assistant-4.4.0 + namespace: home-assistant +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: home-assistant/charts/home-assistant/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: home-assistant-code-server + labels: + app.kubernetes.io/instance: home-assistant + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: home-assistant + app.kubernetes.io/service: home-assistant-code-server + helm.sh/chart: home-assistant-4.4.0 + namespace: home-assistant +spec: + type: ClusterIP + ports: + - port: 8443 + targetPort: 8443 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: home-assistant + app.kubernetes.io/name: home-assistant +--- +# Source: home-assistant/charts/home-assistant/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: home-assistant-main + labels: + app.kubernetes.io/instance: home-assistant + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: home-assistant + app.kubernetes.io/service: home-assistant-main + helm.sh/chart: home-assistant-4.4.0 + namespace: home-assistant +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 8123 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: home-assistant + app.kubernetes.io/name: home-assistant +--- +# Source: home-assistant/charts/home-assistant/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: home-assistant + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: home-assistant + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: home-assistant + helm.sh/chart: home-assistant-4.4.0 + namespace: home-assistant +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: home-assistant + app.kubernetes.io/instance: home-assistant + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: home-assistant + app.kubernetes.io/name: home-assistant + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: PUID + value: "1000" + - name: PGID + value: "1000" + - name: DEFAULT_WORKSPACE + value: /config + envFrom: + - secretRef: + name: home-assistant-code-server-password-secret + image: ghcr.io/linuxserver/code-server:4.106.2@sha256:a98afdbcb59559f11e5e8df284062e55da1076b2e470e13db4aae133ea82bad0 + imagePullPolicy: IfNotPresent + name: code-server + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /config/home-assistant + name: config + - env: + - name: TZ + value: US/Central + image: ghcr.io/home-assistant/home-assistant:2025.11.3 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 50m + memory: 512Mi + volumeMounts: + - mountPath: /config + name: config + volumes: + - name: config + persistentVolumeClaim: + claimName: home-assistant-config +--- +# Source: home-assistant/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: home-assistant-code-server-password-secret + namespace: home-assistant + labels: + app.kubernetes.io/name: home-assistant-code-server-password-secret + app.kubernetes.io/instance: home-assistant + app.kubernetes.io/part-of: home-assistant +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/home-assistant/code-server/auth + metadataPolicy: None + property: PASSWORD + - secretKey: SUDO_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/home-assistant/code-server/auth + metadataPolicy: None + property: SUDO_PASSWORD +--- +# Source: home-assistant/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: home-assistant-token-secret + namespace: home-assistant + labels: + app.kubernetes.io/name: home-assistant-token-secret + app.kubernetes.io/instance: home-assistant + app.kubernetes.io/part-of: home-assistant +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: bearer-token + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/home-assistant/auth + metadataPolicy: None + property: bearer-token +--- +# Source: home-assistant/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-home-assistant + namespace: home-assistant + labels: + app.kubernetes.io/name: http-route-home-assistant + app.kubernetes.io/instance: home-assistant + app.kubernetes.io/part-of: home-assistant +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - home-assistant.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: home-assistant-main + port: 80 + weight: 100 +--- +# Source: home-assistant/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-home-assistant-code-server + namespace: home-assistant + labels: + app.kubernetes.io/name: http-route-home-assistant-code-server + app.kubernetes.io/instance: home-assistant + app.kubernetes.io/part-of: home-assistant +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - home-assistant-code-server.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: home-assistant-code-server + port: 8443 + weight: 100 +--- +# Source: home-assistant/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: home-assistant + namespace: home-assistant + labels: + app.kubernetes.io/name: home-assistant + app.kubernetes.io/instance: home-assistant + app.kubernetes.io/part-of: home-assistant +spec: + selector: + matchLabels: + app.kubernetes.io/name: home-assistant + app.kubernetes.io/service: home-assistant-main + app.kubernetes.io/instance: home-assistant + endpoints: + - port: http + interval: 3m + scrapeTimeout: 1m + path: /api/prometheus + bearerTokenSecret: + name: home-assistant-token-secret + key: bearer-token diff --git a/clusters/cl01tl/manifests/homepage-dev/homepage-dev.yaml b/clusters/cl01tl/manifests/homepage-dev/homepage-dev.yaml new file mode 100644 index 000000000..49e744f13 --- /dev/null +++ b/clusters/cl01tl/manifests/homepage-dev/homepage-dev.yaml @@ -0,0 +1,307 @@ +--- +# Source: homepage/charts/homepage/templates/common.yaml +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: homepage-dev + labels: + app.kubernetes.io/instance: homepage-dev + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: homepage + helm.sh/chart: homepage-4.4.0 + namespace: homepage-dev +data: + bookmarks.yaml: "" + docker.yaml: "" + kubernetes.yaml: "" + services.yaml: | + - Applications: + - Auth: + icon: sh-authentik.webp + description: Authentik + href: https://auth.alexlebens.dev + siteMonitor: https://auth.alexlebens.dev + statusStyle: dot + - Gitea: + icon: sh-gitea.webp + description: Gitea + href: https://gitea.alexlebens.dev + siteMonitor: https://gitea.alexlebens.dev + statusStyle: dot + - Code: + icon: sh-visual-studio-code.webp + description: VS Code + href: https://codeserver.alexlebens.dev + siteMonitor: https://codeserver.alexlebens.dev + statusStyle: dot + - Site: + icon: https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/logo-new-round.png + description: Profile Website + href: https://www.alexlebens.dev + siteMonitor: https://www.alexlebens.dev + statusStyle: dot + - Content Management: + icon: directus.png + description: Directus + href: https://directus.alexlebens.dev + siteMonitor: https://directus.alexlebens.dev + statusStyle: dot + - Social Media Management: + icon: sh-postiz.webp + description: Postiz + href: https://postiz.alexlebens.dev + siteMonitor: https://postiz.alexlebens.dev + statusStyle: dot + - Chat: + icon: sh-element.webp + description: Matrix + href: https://chat.alexlebens.dev + siteMonitor: https://chat.alexlebens.dev + statusStyle: dot + - Wiki: + icon: sh-outline.webp + description: Outline + href: https://wiki.alexlebens.dev + siteMonitor: https://wiki.alexlebens.dev + statusStyle: dot + - Passwords: + icon: sh-vaultwarden-light.webp + description: Vaultwarden + href: https://passwords.alexlebens.dev + siteMonitor: https://passwords.alexlebens.dev + statusStyle: dot + - Bookmarks: + icon: sh-karakeep-light.webp + description: Karakeep + href: https://karakeep.alexlebens.dev + siteMonitor: https://karakeep.alexlebens.dev + statusStyle: dot + - RSS: + icon: sh-freshrss.webp + description: FreshRSS + href: https://rss.alexlebens.dev + siteMonitor: https://rss.alexlebens.dev + statusStyle: dot + settings.yaml: | + favicon: https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/logo-new-round.svg + headerStyle: clean + hideVersion: true + color: zinc + background: + image: https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/background.jpg + brightness: 50 + theme: dark + disableCollapse: true + widgets.yaml: | + - logo: + icon: https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/logo-new-round.png + - datetime: + text_size: xl + format: + dateStyle: long + timeStyle: short + hour12: false + - openmeteo: + label: St. Paul + latitude: 44.954445 + longitude: -93.091301 + timezone: America/Chicago + units: metric + cache: 5 + format: + maximumFractionDigits: 0 +--- +# Source: homepage/charts/homepage/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: homepage-dev + labels: + app.kubernetes.io/instance: homepage-dev + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: homepage + app.kubernetes.io/service: homepage-dev + helm.sh/chart: homepage-4.4.0 + namespace: homepage-dev +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 3000 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: homepage-dev + app.kubernetes.io/name: homepage +--- +# Source: homepage/charts/cloudflared/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: homepage-dev-cloudflared + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: homepage-dev + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cloudflared + app.kubernetes.io/version: 2025.10.0 + helm.sh/chart: cloudflared-1.23.0 + namespace: homepage-dev +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: cloudflared + app.kubernetes.io/instance: homepage-dev + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: homepage-dev + app.kubernetes.io/name: cloudflared + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - args: + - tunnel + - --protocol + - http2 + - --no-autoupdate + - run + - --token + - $(CF_MANAGED_TUNNEL_TOKEN) + env: + - name: CF_MANAGED_TUNNEL_TOKEN + valueFrom: + secretKeyRef: + key: cf-tunnel-token + name: homepage-dev-cloudflared-secret + image: cloudflare/cloudflared:2025.11.1 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: homepage/charts/homepage/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: homepage-dev + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: homepage-dev + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: homepage + helm.sh/chart: homepage-4.4.0 + annotations: + reloader.stakater.com/auto: "true" + namespace: homepage-dev +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: homepage + app.kubernetes.io/instance: homepage-dev + template: + metadata: + annotations: + checksum/configMaps: d1306b9af923c5b3f02566a43c7a141c7168ebf8a74e5ff1a2d5d8082001c1a1 + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: homepage-dev + app.kubernetes.io/name: homepage + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: HOMEPAGE_ALLOWED_HOSTS + value: home.alexlebens.dev + image: ghcr.io/gethomepage/homepage:v1.7.0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /app/config/bookmarks.yaml + mountPropagation: None + name: config + readOnly: true + subPath: bookmarks.yaml + - mountPath: /app/config/docker.yaml + mountPropagation: None + name: config + readOnly: true + subPath: docker.yaml + - mountPath: /app/config/kubernetes.yaml + mountPropagation: None + name: config + readOnly: true + subPath: kubernetes.yaml + - mountPath: /app/config/services.yaml + mountPropagation: None + name: config + readOnly: true + subPath: services.yaml + - mountPath: /app/config/settings.yaml + mountPropagation: None + name: config + readOnly: true + subPath: settings.yaml + - mountPath: /app/config/widgets.yaml + mountPropagation: None + name: config + readOnly: true + subPath: widgets.yaml + volumes: + - configMap: + name: homepage-dev + name: config +--- +# Source: homepage/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: homepage-dev-cloudflared-secret + namespace: homepage-dev + labels: + app.kubernetes.io/name: homepage-dev-cloudflared-secret + app.kubernetes.io/instance: homepage-dev + app.kubernetes.io/part-of: homepage-dev +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: cf-tunnel-token + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cloudflare/tunnels/homepage-dev + metadataPolicy: None + property: token diff --git a/clusters/cl01tl/manifests/homepage/homepage.yaml b/clusters/cl01tl/manifests/homepage/homepage.yaml new file mode 100644 index 000000000..97261bdad --- /dev/null +++ b/clusters/cl01tl/manifests/homepage/homepage.yaml @@ -0,0 +1,1132 @@ +--- +# Source: homepage/charts/homepage/templates/common.yaml +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: homepage + labels: + app.kubernetes.io/instance: homepage + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: homepage + helm.sh/chart: homepage-4.4.0 + namespace: homepage +secrets: + - name: homepage-homepage-sa-token +--- +# Source: homepage/charts/homepage/templates/common.yaml +apiVersion: v1 +kind: Secret +type: kubernetes.io/service-account-token +metadata: + name: homepage-homepage-sa-token + labels: + app.kubernetes.io/instance: homepage + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: homepage + helm.sh/chart: homepage-4.4.0 + annotations: + kubernetes.io/service-account.name: homepage + namespace: homepage +--- +# Source: homepage/charts/homepage/templates/common.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: homepage + labels: + app.kubernetes.io/instance: homepage + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: homepage + helm.sh/chart: homepage-4.4.0 + namespace: homepage +data: + bookmarks.yaml: | + - External Services: + - Github: + - abbr: GH + href: https://github.com/alexlebens + - Digital Ocean: + - abbr: DO + href: https://www.digitalocean.com/ + - AWS: + - abbr: AW + href: https://aws.amazon.com/console/ + - Cloudflare: + - abbr: CF + href: https://dash.cloudflare.com/b76e303258b84076ee01fd0f515c0768 + - Tailscale: + - abbr: TS + href: https://login.tailscale.com/admin/machines + - ProtonVPN: + - abbr: PV + href: https://account.protonvpn.com/ + - Unifi: + - abbr: UF + href: https://unifi.ui.com/ + - Pushover: + - abbr: PO + href: https://pushover.net + - ReCaptcha: + - abbr: RC + href: https://www.google.com/recaptcha/admin/site/698983587 + - Trackers: + - Torrentleech: + - abbr: TL + href: https://www.torrentleech.org + - Avistaz: + - abbr: AV + href: https://avistaz.to + - Cinemaz: + - abbr: CM + href: https://cinemaz.to + - Cathode Ray Tube: + - abbr: CRT + href: https://www.cathode-ray.tube + - Alpha Ratio: + - abbr: AL + href: https://alpharatio.cc/ + - MV Group: + - abbr: MV + href: https://forums.mvgroup.org + docker.yaml: "" + kubernetes.yaml: | + mode: cluster + services.yaml: | + - Media: + - Plex: + icon: sh-plex.webp + description: Media server + href: https://plex.alexlebens.net + siteMonitor: http://plex.plex:32400 + statusStyle: dot + - Jellyfin: + icon: sh-jellyfin.webp + description: Media server + href: https://jellyfin.alexlebens.net + siteMonitor: http://jellyfin.jellyfin:80 + statusStyle: dot + - Media Requests: + icon: sh-overseerr.webp + description: Overseer + href: https://overseerr.alexlebens.net + siteMonitor: http://overseerr.overseerr:80 + statusStyle: dot + - Media Tracking: + icon: sh-yamtrack.webp + description: Yamtrack + href: https://yamtrack.alexlebens.net + siteMonitor: http://yamtrack.yamtrack:80 + statusStyle: dot + - Youtube Archive: + icon: sh-tube-archivist-light.webp + description: TubeAchivist + href: https://tubearchivist.alexlebens.net/login + siteMonitor: http://tubearchivist.tubearchivist:80 + statusStyle: dot + - Photos: + icon: sh-immich.webp + description: Immich + href: https://immich.alexlebens.net + siteMonitor: http://immich-main.immich:2283 + statusStyle: dot + - Pictures: + icon: sh-photoview.webp + description: Photoview + href: https://photoview.alexlebens.net + siteMonitor: http://photoview.photoview:80 + statusStyle: dot + - Podcasts and Audiobooks: + icon: sh-audiobookshelf.webp + description: Audiobookshelf + href: https://audiobookshelf.alexlebens.net + siteMonitor: http://audiobookshelf.audiobookshelf:80 + statusStyle: dot + - Books: + icon: sh-booklore.webp + description: Booklore + href: https://booklore.alexlebens.net + siteMonitor: http://booklore.booklore:80 + statusStyle: dot + - Public: + - Site: + icon: https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/logo-new-round.png + description: Profile Website + href: https://www.alexlebens.dev + siteMonitor: https://www.alexlebens.dev + statusStyle: dot + - Content Management: + icon: directus.png + description: Directus + href: https://directus.alexlebens.dev + siteMonitor: https://directus.alexlebens.dev + statusStyle: dot + - Social Media Management: + icon: sh-postiz.webp + description: Postiz + href: https://postiz.alexlebens.dev + siteMonitor: https://postiz.alexlebens.dev + statusStyle: dot + - Chat: + icon: sh-element.webp + description: Matrix + href: https://chat.alexlebens.dev + siteMonitor: https://chat.alexlebens.dev + statusStyle: dot + - Wiki: + icon: sh-outline.webp + description: Outline + href: https://wiki.alexlebens.dev + siteMonitor: https://wiki.alexlebens.dev + statusStyle: dot + - Passwords: + icon: sh-vaultwarden-light.webp + description: Vaultwarden + href: https://passwords.alexlebens.dev + siteMonitor: https://passwords.alexlebens.dev + statusStyle: dot + - Bookmarks: + icon: sh-karakeep-light.webp + description: Karakeep + href: https://karakeep.alexlebens.dev + siteMonitor: https://karakeep.alexlebens.dev + statusStyle: dot + - RSS: + icon: sh-freshrss.webp + description: FreshRSS + href: https://rss.alexlebens.dev + siteMonitor: https://rss.alexlebens.dev + statusStyle: dot + - Internal: + - Home Automation: + icon: sh-home-assistant.webp + description: Home Assistant + href: https://home-assistant.alexlebens.net + siteMonitor: http://home-assistant-main.home-assistant:80 + statusStyle: dot + - Budgeting: + icon: sh-actual-budget.webp + description: Actual + href: https://actual.alexlebens.net + siteMonitor: http://actual.actual:80 + statusStyle: dot + - AI: + icon: sh-ollama.webp + description: Ollama + href: https://ollama.alexlebens.net + siteMonitor: http://ollama-web.ollama:80 + statusStyle: dot + - AI Image: + icon: https://user-images.githubusercontent.com/36368048/196280761-1535f413-a91e-4b6a-af6a-b890f8ae204c.png + description: Stable Diffusion + href: https://stable-diffusion-pd05wd.boreal-beaufort.ts.net + siteMonitor: https://stable-diffusion-pd05wd.boreal-beaufort.ts.net + statusStyle: dot + - Search: + icon: sh-searxng.webp + description: Searxng + href: https://searxng.alexlebens.net/ + siteMonitor: http://searxng-browser.searxng:80 + statusStyle: dot + - Email: + icon: sh-roundcube.webp + description: Roundcube + href: https://mail.alexlebens.net + siteMonitor: http://roundcube.roundcube:80 + statusStyle: dot + - Wiki: + icon: sh-kiwix-light.webp + description: Kiwix + href: https://kiwix.alexlebens.net + siteMonitor: http://kiwix.kiwix:80 + statusStyle: dot + - Code: + - Code (Public): + icon: sh-gitea.webp + description: Gitea + href: https://gitea.alexlebens.dev + siteMonitor: https://gitea.alexlebens.dev + statusStyle: dot + - Code (Local): + icon: sh-gitea.webp + description: Gitea + href: https://gitea.alexlebens.net + siteMonitor: https://gitea.alexlebens.net + statusStyle: dot + - Code (ps10rp): + icon: sh-gitea.webp + description: Gitea + href: https://gitea-ps10rp.boreal-beaufort.ts.net + siteMonitor: https://gitea-ps10rp.boreal-beaufort.ts.net + statusStyle: dot + - IDE (Public): + icon: sh-visual-studio-code.webp + description: VS Code + href: https://codeserver.alexlebens.dev + siteMonitor: https://codeserver.alexlebens.dev + statusStyle: dot + - IDE (Home Assistant): + icon: sh-visual-studio-code.webp + description: Edit config for Home Assistant + href: https://home-assistant-code-server.alexlebens.net + siteMonitor: http://home-assistant-code-server.home-assistant:8443 + statusStyle: dot + - Continuous Deployment: + icon: sh-argo-cd.webp + description: ArgoCD + href: https://argocd.alexlebens.net + siteMonitor: http://argocd-server.argocd:80 + statusStyle: dot + - Docker Deployment: + icon: sh-komodo-light.webp + description: Komodo + href: https://komodo.alexlebens.net + siteMonitor: http://komodo-main.komodo:80 + statusStyle: dot + - Automation: + - Deployment Workflows: + icon: sh-argo-cd.webp + description: Argo Workflows + href: https://argo-workflows.alexlebens.net + siteMonitor: http://argo-workflows-server.argo-workflows:2746 + statusStyle: dot + - API Workflows: + icon: sh-n8n.webp + description: n8n + href: https://n8n.alexlebens.net + siteMonitor: http://n8n-main.n8n:80 + statusStyle: dot + - Jobs: + icon: https://raw.githubusercontent.com/mshade/kronic/main/static/android-chrome-192x192.png + description: Kronic + href: https://kronic.alexlebens.net + siteMonitor: http://kronic.kronic:80 + statusStyle: dot + - Uptime: + icon: sh-gatus.webp + description: Gatus + href: https://gatus.alexlebens.net + siteMonitor: http://gatus.gatus:80 + statusStyle: dot + - Tools: + icon: sh-omnitools.webp + description: OmniTools + href: https://omni-tools.alexlebens.net + siteMonitor: http://omni-tools.omni-tools:80 + statusStyle: dot + - Monitoring: + - Kubernetes: + icon: sh-headlamp.webp + description: Headlamp + href: https://headlamp.alexlebens.net + siteMonitor: http://headlamp.headlamp:80 + statusStyle: dot + - Network Monitoring: + icon: sh-cilium.webp + description: Hubble for Cilium + href: https://hubble.alexlebens.net + siteMonitor: http://hubble-ui.kube-system:80 + statusStyle: dot + - Dashboard: + icon: sh-grafana.webp + description: Grafana + href: https://grafana.alexlebens.net + siteMonitor: http://grafana-main-service.grafana-operator:3000/api/health + statusStyle: dot + - Metrics: + icon: sh-prometheus.webp + description: Prometheus + href: https://prometheus.alexlebens.net + siteMonitor: http://kube-prometheus-stack-prometheus.kube-prometheus-stack:9090 + statusStyle: dot + widget: + type: prometheus + url: http://kube-prometheus-stack-prometheus.kube-prometheus-stack:9090 + - Alerting: + icon: sh-prometheus-light.webp + description: Alertmanager + href: https://alertmanager.alexlebens.net + siteMonitor: http://kube-prometheus-stack-alertmanager.kube-prometheus-stack:9093 + statusStyle: dot + widget: + type: prometheusmetric + url: http://kube-prometheus-stack-prometheus.kube-prometheus-stack:9090 + refreshInterval: 120s + metrics: + - label: Alerts Active + query: alertmanager_alerts{state="active"} + - label: Metric Database Size + query: prometheus_tsdb_storage_blocks_bytes + format: + type: bytes + - Tautulli: + icon: sh-tautulli.webp + description: Plex Monitoring + href: https://tautulli.alexlebens.net + siteMonitor: http://tautulli.tautulli:80 + statusStyle: dot + - Jellystat: + icon: sh-jellystat.webp + description: Jellyfin Monitoring + href: https://jellystat.alexlebens.net + siteMonitor: http://jellystat.jellystat:80 + statusStyle: dot + - Services: + - Auth (Public): + icon: sh-authentik.webp + description: Authentik + href: https://auth.alexlebens.dev + siteMonitor: https://auth.alexlebens.dev + statusStyle: dot + - Auth (Local): + icon: sh-authentik.webp + description: Authentik + href: https://authentik.alexlebens.net + siteMonitor: http://authentik-server.authentik:80 + statusStyle: dot + - Email: + icon: sh-stalwart.webp + description: Stalwart + href: https://stalwart.alexlebens.net + siteMonitor: http://stalwart.stalwart:80 + statusStyle: dot + - Notifications: + icon: sh-ntfy.webp + description: ntfy + href: https://ntfy.alexlebens.net + siteMonitor: http://ntfy.ntfy:80 + statusStyle: dot + - Reverse Proxy: + icon: sh-traefik.webp + description: Traefik + href: https://traefik-cl01tl.alexlebens.net/dashboard/#/ + siteMonitor: https://traefik-cl01tl.alexlebens.net/dashboard/#/ + statusStyle: dot + widget: + type: traefik + url: https://traefik-cl01tl.alexlebens.net + - Image Cache: + icon: sh-harbor.webp + description: Harbor + href: https://harbor.alexlebens.net + siteMonitor: http://harbor-portal.harbor:80 + statusStyle: dot + - Hardware: + - Network Management (alexlebens.net): + icon: sh-ubiquiti-unifi.webp + description: Unifi + href: https://unifi.alexlebens.net + siteMonitor: https://unifi.alexlebens.net + statusStyle: dot + - Network Attached Storage: + icon: sh-synology-light.webp + description: Synology + href: https://synology.alexlebens.net + siteMonitor: https://synology.alexlebens.net + statusStyle: dot + widget: + type: diskstation + url: https://synology.alexlebens.net + username: {{HOMEPAGE_VAR_SYNOLOGY_USER}} + password: {{HOMEPAGE_VAR_SYNOLOGY_PASSWORD}} + volume: volume_2 + - TV Tuner: + icon: sh-hdhomerun.webp + description: HD Homerun + href: http://hdhr.alexlebens.net + siteMonitor: http://hdhr.alexlebens.net + statusStyle: dot + widget: + type: hdhomerun + url: http://hdhr.alexlebens.net + tuner: 0 + fields: ["channels", "hd"] + - KVM: + icon: sh-pikvm-light.webp + description: Pi KVM + href: https://pikvm.alexlebens.net + siteMonitor: https://pikvm.alexlebens.net + statusStyle: dot + - Server Plug: + icon: sh-shelly.webp + description: Shelly + href: http://it05sp.alexlebens.net + siteMonitor: http://it05sp.alexlebens.net + statusStyle: dot + - Storage: + - Cluster Storage: + icon: sh-ceph.webp + description: Ceph + href: https://ceph.alexlebens.net + siteMonitor: http://rook-ceph-mgr-dashboard.rook-ceph:7000 + statusStyle: dot + - Object Storage (NAS): + icon: sh-garage.webp + description: Garage + href: https://garage-webui.alexlebens.net + siteMonitor: http://garage-webui.garage:3909 + statusStyle: dot + - Object Storage (ps10rp): + icon: sh-garage.webp + description: Garage + href: https://garage-ui-ps10rp.boreal-beaufort.ts.net + siteMonitor: https://garage-ui-ps10rp.boreal-beaufort.ts.net + statusStyle: dot + - Database: + icon: sh-pgadmin-light.webp + description: PGAdmin + href: https://pgadmin.alexlebens.net + siteMonitor: http://pgadmin.pgadmin:80 + statusStyle: dot + - Database: + icon: sh-whodb.webp + description: WhoDB + href: https://whodb.alexlebens.net + siteMonitor: http://whodb.whodb:80 + statusStyle: dot + - Secrets: + icon: sh-hashicorp-vault.webp + description: Vault + href: https://vault.alexlebens.net + siteMonitor: http://vault.vault:8200 + statusStyle: dot + - Backups: + icon: sh-backrest-light.webp + description: Backrest + href: https://backrest.alexlebens.net + siteMonitor: http://backrest.backrest:80 + statusStyle: dot + - Content: + - qUI: + icon: https://raw.githubusercontent.com/autobrr/qui/8487c818886df9abb2b1456f43b54e0ba180a2bd/web/public/icons.svg + description: qbitorrent + href: https://qui.alexlebens.net + siteMonitor: http://qbittorrent-qui.qbittorrent:80 + statusStyle: dot + widget: + type: qbittorrent + url: http://qbittorrent.qbittorrent:8080 + enableLeechProgress: true + - Prowlarr: + icon: sh-prowlarr.webp + description: Indexers + href: https://prowlarr.alexlebens.net + siteMonitor: http://prowlarr.prowlarr:80 + statusStyle: dot + - Huntarr: + icon: https://raw.githubusercontent.com/plexguide/Huntarr.io/main/frontend/static/logo/128.png + description: Content upgrader + href: https://huntarr.alexlebens.net + siteMonitor: http://huntarr.huntarr:80 + statusStyle: dot + - Bazarr: + icon: sh-bazarr.webp + description: Subtitles + href: https://bazarr.alexlebens.net + siteMonitor: http://bazarr.bazarr:80 + statusStyle: dot + - Tdarr: + icon: sh-tdarr.webp + description: Media transcoding and health checks + href: https://tdarr.alexlebens.net + siteMonitor: http://tdarr-web.tdarr:8265 + statusStyle: dot + widget: + type: tdarr + url: http://tdarr-web.tdarr:8265 + - TV Shows: + - Sonarr: + icon: sh-sonarr.webp + description: TV Shows + href: https://sonarr.alexlebens.net + siteMonitor: http://sonarr.sonarr:80 + statusStyle: dot + widget: + type: sonarr + url: http://sonarr.sonarr:80 + key: {{HOMEPAGE_VAR_SONARR_KEY}} + fields: ["wanted", "queued", "series"] + enableQueue: false + - Sonarr 4K: + icon: sh-sonarr.webp + description: TV Shows 4K + href: https://sonarr-4k.alexlebens.net + siteMonitor: http://sonarr-4k.sonarr-4k:80 + statusStyle: dot + widget: + type: sonarr + url: http://sonarr-4k.sonarr-4k:80 + key: {{HOMEPAGE_VAR_SONARR4K_KEY}} + fields: ["wanted", "queued", "series"] + enableQueue: false + - Sonarr Anime: + icon: sh-sonarr.webp + description: Anime Shows + href: https://sonarr-anime.alexlebens.net + siteMonitor: http://sonarr-anime.sonarr-anime:80 + statusStyle: dot + widget: + type: sonarr + url: http://sonarr-anime.sonarr-anime:80 + key: {{HOMEPAGE_VAR_SONARRANIME_KEY}} + fields: ["wanted", "queued", "series"] + enableQueue: false + - Movies: + - Radarr: + icon: sh-radarr.webp + description: Movies + href: https://radarr.alexlebens.net + siteMonitor: http://radarr.radarr:80 + statusStyle: dot + widget: + type: radarr + url: http://radarr.radarr:80 + key: {{HOMEPAGE_VAR_RADARR_KEY}} + fields: ["wanted", "queued", "movies"] + enableQueue: false + - Radarr 4K: + icon: sh-radarr-4k.webp + description: Movies 4K + href: https://radarr-4k.alexlebens.net + siteMonitor: http://radarr-4k.radarr-4k:80 + statusStyle: dot + widget: + type: radarr + url: http://radarr-4k.radarr-4k:80 + key: {{HOMEPAGE_VAR_RADARR4K_KEY}} + fields: ["wanted", "queued", "movies"] + enableQueue: false + - Radarr Anime: + icon: sh-radarr-anime.webp + description: Anime Movies + href: https://radarr-anime.alexlebens.net + siteMonitor: http://radarr-anime.radarr-anime:80 + statusStyle: dot + widget: + type: radarr + url: http://radarr-anime.radarr-anime:80 + key: {{HOMEPAGE_VAR_RADARRANIME_KEY}} + fields: ["wanted", "queued", "movies"] + enableQueue: false + - Radarr Stand Up: + icon: sh-radarr-light-hybrid.webp + description: Stand Up + href: https://radarr-standup.alexlebens.net + siteMonitor: http://radarr-standup.radarr-standup:80 + statusStyle: dot + widget: + type: radarr + url: http://radarr-standup.radarr-standup:80 + key: {{HOMEPAGE_VAR_RADARRSTANDUP_KEY}} + fields: ["wanted", "queued", "movies"] + enableQueue: false + - Music: + - Lidarr: + icon: sh-lidarr.webp + description: Music + href: https://lidarr.alexlebens.net + siteMonitor: http://lidarr.lidarr:80 + statusStyle: dot + widget: + type: lidarr + url: http://lidarr.lidarr:80 + key: {{HOMEPAGE_VAR_LIDARR_KEY}} + fields: ["wanted", "queued", "artists"] + - LidaTube: + icon: sh-lidatube.webp + description: Searches for Music + href: https://lidatube.alexlebens.net + siteMonitor: http://lidatube.lidatube:80 + statusStyle: dot + - Soulseek: + icon: sh-slskd.webp + description: slskd + href: https://slskd.alexlebens.net + siteMonitor: http://slskd.slskd:5030 + statusStyle: dot + - Books: + - Ephemera: + icon: sh-ephemera.webp + description: Books + href: https://ephemera.alexlebens.net + siteMonitor: http://ephemera.ephemera:80 + statusStyle: dot + - Listenarr: + icon: sh-audiobookrequest.webp + description: Audiobooks + href: https://listenarr.alexlebens.net + siteMonitor: http://listenarr.listenarr:80 + statusStyle: dot + - Other Homes: + - Dev: + icon: sh-homepage.webp + description: Public Homepage + href: https://home.alexlebens.dev + siteMonitor: https://home.alexlebens.dev + statusStyle: dot + - Lebens Home: + icon: sh-homepage.webp + description: Lebens Homepage + href: https://home-ps10rp.boreal-beaufort.ts.net + siteMonitor: https://home-ps10rp.boreal-beaufort.ts.net + statusStyle: dot + settings.yaml: | + favicon: https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/logo-new-round.svg + headerStyle: clean + hideVersion: true + color: zinc + background: + image: https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/background.jpg + brightness: 50 + theme: dark + disableCollapse: true + layout: + - Media: + tab: Applications + icon: mdi-multimedia-#ffffff + - Public: + tab: Applications + icon: mdi-earth-#ffffff + - Internal: + tab: Applications + icon: mdi-security-network-#ffffff + - Code: + tab: Tools + icon: mdi-code-block-braces-#ffffff + - Automation: + tab: Tools + icon: mdi-wrench-#ffffff + - Monitoring: + tab: Tools + icon: mdi-chart-line-#ffffff + - Services: + tab: Services + icon: mdi-toolbox-outline-#ffffff + - Hardware: + tab: Services + icon: mdi-server-network-#ffffff + - Storage: + tab: Services + icon: mdi-database-#ffffff + - Content: + tab: Services + icon: mdi-multimedia-#ffffff + - TV Shows: + tab: Content + icon: mdi-television-#ffffff + - Movies: + tab: Content + icon: mdi-filmstrip-#ffffff + - Music: + tab: Content + icon: mdi-music-box-multiple-#ffffff + - Books: + tab: Content + icon: mdi-book-open-variant-#ffffff + - External Services: + tab: Bookmarks + icon: mdi-cloud-#ffffff + - Other Homes: + tab: Bookmarks + icon: mdi-cloud-#ffffff + - Trackers: + tab: Bookmarks + icon: mdi-cloud-#ffffff + widgets.yaml: | + - logo: + icon: https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/logo-new-round.png + - kubernetes: + cluster: + show: true + cpu: true + memory: true + showLabel: false + label: "Cluster" + nodes: + show: false + - datetime: + text_size: xl + format: + dateStyle: long + timeStyle: short + hour12: false + - openmeteo: + label: St. Paul + latitude: 44.954445 + longitude: -93.091301 + timezone: America/Chicago + units: metric + cache: 5 + format: + maximumFractionDigits: 0 +--- +# Source: homepage/templates/cluster-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: homepage + namespace: homepage + labels: + app.kubernetes.io/name: homepage + app.kubernetes.io/instance: homepage + app.kubernetes.io/part-of: homepage +rules: + - apiGroups: + - "" + resources: + - namespaces + - pods + - nodes + verbs: + - get + - list + - apiGroups: + - extensions + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - traefik.io + resources: + - ingressroutes + verbs: + - get + - list + - apiGroups: + - gateway.networking.k8s.io + resources: + - httproutes + - gateways + verbs: + - get + - list + - apiGroups: + - metrics.k8s.io + resources: + - nodes + - pods + verbs: + - get + - list +--- +# Source: homepage/templates/cluster-role-binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: homepage + namespace: homepage + labels: + app.kubernetes.io/name: homepage + app.kubernetes.io/instance: homepage + app.kubernetes.io/part-of: homepage +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: homepage +subjects: + - kind: ServiceAccount + name: homepage + namespace: homepage +--- +# Source: homepage/charts/homepage/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: homepage + labels: + app.kubernetes.io/instance: homepage + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: homepage + app.kubernetes.io/service: homepage + helm.sh/chart: homepage-4.4.0 + namespace: homepage +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 3000 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: homepage + app.kubernetes.io/name: homepage +--- +# Source: homepage/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: gitea-ps10rp + namespace: homepage + labels: + app.kubernetes.io/name: gitea-ps10rp + app.kubernetes.io/instance: homepage + app.kubernetes.io/part-of: homepage + annotations: + tailscale.com/tailnet-fqdn: gitea-ps10rp.boreal-beaufort.ts.net +spec: + externalName: placeholder + type: ExternalName +--- +# Source: homepage/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: home-ps10rp + namespace: homepage + labels: + app.kubernetes.io/name: home-ps10rp + app.kubernetes.io/instance: homepage + app.kubernetes.io/part-of: homepage + annotations: + tailscale.com/tailnet-fqdn: home-ps10rp.boreal-beaufort.ts.net +spec: + externalName: placeholder + type: ExternalName +--- +# Source: homepage/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: garage-ui-ps10rp + namespace: homepage + labels: + app.kubernetes.io/name: garage-ps10rp + app.kubernetes.io/instance: homepage + app.kubernetes.io/part-of: homepage + annotations: + tailscale.com/tailnet-fqdn: garage-ui-ps10rp.boreal-beaufort.ts.net +spec: + externalName: placeholder + type: ExternalName +--- +# Source: homepage/charts/homepage/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: homepage + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: homepage + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: homepage + helm.sh/chart: homepage-4.4.0 + annotations: + reloader.stakater.com/auto: "true" + namespace: homepage +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: homepage + app.kubernetes.io/instance: homepage + template: + metadata: + annotations: + checksum/configMaps: 5b025903635dfc4abfcdb07fac7674f70d46a2d7bbeeeb1c7cd95e68e03f53ea + checksum/secrets: d3ba83f111cd32f92c909268c55ad8bbd4f9e299b74b35b33c1a011180d8b378 + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: homepage + app.kubernetes.io/name: homepage + spec: + enableServiceLinks: false + serviceAccountName: homepage + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: HOMEPAGE_ALLOWED_HOSTS + value: home.alexlebens.net + envFrom: + - secretRef: + name: homepage-keys-secret + image: ghcr.io/gethomepage/homepage:v1.7.0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /app/config/bookmarks.yaml + mountPropagation: None + name: config + readOnly: true + subPath: bookmarks.yaml + - mountPath: /app/config/docker.yaml + mountPropagation: None + name: config + readOnly: true + subPath: docker.yaml + - mountPath: /app/config/kubernetes.yaml + mountPropagation: None + name: config + readOnly: true + subPath: kubernetes.yaml + - mountPath: /app/config/services.yaml + mountPropagation: None + name: config + readOnly: true + subPath: services.yaml + - mountPath: /app/config/settings.yaml + mountPropagation: None + name: config + readOnly: true + subPath: settings.yaml + - mountPath: /app/config/widgets.yaml + mountPropagation: None + name: config + readOnly: true + subPath: widgets.yaml + volumes: + - configMap: + name: homepage + name: config +--- +# Source: homepage/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: homepage-keys-secret + namespace: homepage + labels: + app.kubernetes.io/name: homepage-keys-secret + app.kubernetes.io/instance: homepage + app.kubernetes.io/part-of: homepage +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: HOMEPAGE_VAR_SYNOLOGY_USER + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /synology/auth/cl01tl + metadataPolicy: None + property: user + - secretKey: HOMEPAGE_VAR_SYNOLOGY_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /synology/auth/cl01tl + metadataPolicy: None + property: password + - secretKey: HOMEPAGE_VAR_UNIFI_USER + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /unifi/auth/cl01tl + metadataPolicy: None + property: user + - secretKey: HOMEPAGE_VAR_UNIFI_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /unifi/auth/cl01tl + metadataPolicy: None + property: password + - secretKey: HOMEPAGE_VAR_SONARR_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/sonarr4/key + metadataPolicy: None + property: key + - secretKey: HOMEPAGE_VAR_SONARR4K_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/sonarr4-4k/key + metadataPolicy: None + property: key + - secretKey: HOMEPAGE_VAR_SONARRANIME_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/sonarr4-anime/key + metadataPolicy: None + property: key + - secretKey: HOMEPAGE_VAR_RADARR_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/radarr5/key + metadataPolicy: None + property: key + - secretKey: HOMEPAGE_VAR_RADARR4K_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/radarr5-4k/key + metadataPolicy: None + property: key + - secretKey: HOMEPAGE_VAR_RADARRANIME_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/radarr5-anime/key + metadataPolicy: None + property: key + - secretKey: HOMEPAGE_VAR_RADARRSTANDUP_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/radarr5-standup/key + metadataPolicy: None + property: key + - secretKey: HOMEPAGE_VAR_LIDARR_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/lidarr2/key + metadataPolicy: None + property: key + - secretKey: HOMEPAGE_VAR_PROWLARR_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/prowlarr/key + metadataPolicy: None + property: key +--- +# Source: homepage/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-homepage + namespace: homepage + labels: + app.kubernetes.io/name: http-route-homepage + app.kubernetes.io/instance: homepage + app.kubernetes.io/part-of: homepage +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - home.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: homepage + port: 80 + weight: 100 diff --git a/clusters/cl01tl/manifests/huntarr/huntarr.yaml b/clusters/cl01tl/manifests/huntarr/huntarr.yaml new file mode 100644 index 000000000..7b375e62a --- /dev/null +++ b/clusters/cl01tl/manifests/huntarr/huntarr.yaml @@ -0,0 +1,129 @@ +--- +# Source: huntarr/charts/huntarr/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: huntarr-config + labels: + app.kubernetes.io/instance: huntarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: huntarr + helm.sh/chart: huntarr-4.4.0 + namespace: huntarr +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: huntarr/charts/huntarr/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: huntarr + labels: + app.kubernetes.io/instance: huntarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: huntarr + app.kubernetes.io/service: huntarr + helm.sh/chart: huntarr-4.4.0 + namespace: huntarr +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 9705 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: huntarr + app.kubernetes.io/name: huntarr +--- +# Source: huntarr/charts/huntarr/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: huntarr + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: huntarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: huntarr + helm.sh/chart: huntarr-4.4.0 + namespace: huntarr +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: huntarr + app.kubernetes.io/instance: huntarr + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: huntarr + app.kubernetes.io/name: huntarr + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + image: ghcr.io/plexguide/huntarr:8.2.10 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /config + name: config + volumes: + - name: config + persistentVolumeClaim: + claimName: huntarr-config +--- +# Source: huntarr/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-huntarr + namespace: huntarr + labels: + app.kubernetes.io/name: http-route-huntarr + app.kubernetes.io/instance: huntarr + app.kubernetes.io/part-of: huntarr +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - huntarr.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: huntarr + port: 80 + weight: 100 diff --git a/clusters/cl01tl/manifests/immich/immich.yaml b/clusters/cl01tl/manifests/immich/immich.yaml new file mode 100644 index 000000000..64bc6b6d4 --- /dev/null +++ b/clusters/cl01tl/manifests/immich/immich.yaml @@ -0,0 +1,1025 @@ +--- +# Source: immich/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: immich-nfs-storage + namespace: immich + labels: + app.kubernetes.io/name: immich-nfs-storage + app.kubernetes.io/instance: immich + app.kubernetes.io/part-of: immich +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage/Immich + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: immich/charts/immich/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: immich + labels: + app.kubernetes.io/instance: immich + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: immich + helm.sh/chart: immich-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: immich +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "10Gi" + storageClassName: "ceph-block" +--- +# Source: immich/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: immich-nfs-storage + namespace: immich + labels: + app.kubernetes.io/name: immich-nfs-storage + app.kubernetes.io/instance: immich + app.kubernetes.io/part-of: immich +spec: + volumeName: immich-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: immich/charts/immich/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: immich-machine-learning + labels: + app.kubernetes.io/instance: immich + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: immich + app.kubernetes.io/service: immich-machine-learning + helm.sh/chart: immich-4.4.0 + namespace: immich +spec: + type: ClusterIP + ports: + - port: 3003 + targetPort: 3003 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: machine-learning + app.kubernetes.io/instance: immich + app.kubernetes.io/name: immich +--- +# Source: immich/charts/immich/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: immich-main + labels: + app.kubernetes.io/instance: immich + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: immich + app.kubernetes.io/service: immich-main + helm.sh/chart: immich-4.4.0 + namespace: immich +spec: + type: ClusterIP + ports: + - port: 2283 + targetPort: 2283 + protocol: TCP + name: http + - port: 8081 + targetPort: 8081 + protocol: TCP + name: metrics-api + - port: 8082 + targetPort: 8082 + protocol: TCP + name: metrics-ms + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: immich + app.kubernetes.io/name: immich +--- +# Source: immich/charts/immich/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: immich-machine-learning + labels: + app.kubernetes.io/controller: machine-learning + app.kubernetes.io/instance: immich + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: immich + helm.sh/chart: immich-4.4.0 + namespace: immich +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: machine-learning + app.kubernetes.io/name: immich + app.kubernetes.io/instance: immich + template: + metadata: + labels: + app.kubernetes.io/controller: machine-learning + app.kubernetes.io/instance: immich + app.kubernetes.io/name: immich + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TRANSFORMERS_CACHE + value: /cache + image: ghcr.io/immich-app/immich-machine-learning:v2.3.1 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /ping + port: 3003 + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + name: main + readinessProbe: + failureThreshold: 3 + httpGet: + path: /ping + port: 3003 + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + resources: + limits: + gpu.intel.com/i915: 1 + requests: + cpu: 10m + gpu.intel.com/i915: 1 + memory: 256Mi + volumeMounts: + - mountPath: /cache + name: cache + volumes: + - name: cache + persistentVolumeClaim: + claimName: immich +--- +# Source: immich/charts/immich/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: immich-main + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: immich + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: immich + helm.sh/chart: immich-4.4.0 + namespace: immich +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: immich + app.kubernetes.io/instance: immich + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: immich + app.kubernetes.io/name: immich + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: IMMICH_TELEMETRY_INCLUDE + value: all + - name: IMMICH_CONFIG_FILE + value: /config/immich.json + - name: IMMICH_MACHINE_LEARNING_URL + value: http://immich-machine-learning.immich:3003 + - name: REDIS_HOSTNAME + value: redis-replication-immich-master + - name: DB_VECTOR_EXTENSION + value: vectorchord + - name: DB_HOSTNAME + valueFrom: + secretKeyRef: + key: host + name: immich-postgresql-17-cluster-app + - name: DB_DATABASE_NAME + valueFrom: + secretKeyRef: + key: dbname + name: immich-postgresql-17-cluster-app + - name: DB_PORT + valueFrom: + secretKeyRef: + key: port + name: immich-postgresql-17-cluster-app + - name: DB_USERNAME + valueFrom: + secretKeyRef: + key: user + name: immich-postgresql-17-cluster-app + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: immich-postgresql-17-cluster-app + image: ghcr.io/immich-app/immich-server:v2.3.1 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /api/server/ping + port: 2283 + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + name: main + readinessProbe: + failureThreshold: 3 + httpGet: + path: /api/server/ping + port: 2283 + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + resources: + limits: + gpu.intel.com/i915: 1 + requests: + cpu: 10m + gpu.intel.com/i915: 1 + memory: 512Mi + startupProbe: + failureThreshold: 30 + httpGet: + path: /api/server/ping + port: 2283 + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + volumeMounts: + - mountPath: /config/immich.json + mountPropagation: None + name: config + readOnly: true + subPath: immich.json + - mountPath: /usr/src/app/upload + name: media + volumes: + - name: config + secret: + secretName: immich-config-secret + - name: media + persistentVolumeClaim: + claimName: immich-nfs-storage +--- +# Source: immich/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: immich-postgresql-17-cluster + namespace: immich + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: immich-postgresql-17 + app.kubernetes.io/instance: immich + app.kubernetes.io/part-of: immich + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/tensorchord/cloudnative-vectorchord:17.5-0.4.3" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "immich-postgresql-17-external-backup" + serverName: "immich-postgresql-17-backup-2" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "immich-postgresql-17-garage-local-backup" + serverName: "immich-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "immich-postgresql-17-recovery" + serverName: immich-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 256Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + shared_preload_libraries: + - vchord.so + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 256MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: immich-postgresql-17-backup-1 + + externalClusters: + - name: immich-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "immich-postgresql-17-recovery" + serverName: immich-postgresql-17-backup-1 +--- +# Source: immich/templates/external-secrets.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: immich-config-secret + namespace: immich + labels: + app.kubernetes.io/name: immich-config-secret + app.kubernetes.io/instance: immich + app.kubernetes.io/part-of: immich +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: immich.json + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/immich/config + metadataPolicy: None + property: immich.json +--- +# Source: immich/templates/external-secrets.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: immich-postgresql-17-cluster-backup-secret + namespace: immich + labels: + app.kubernetes.io/name: immich-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: immich + app.kubernetes.io/part-of: immich +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: immich/templates/external-secrets.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: immich-postgresql-17-cluster-backup-secret-garage + namespace: immich + labels: + app.kubernetes.io/name: immich-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: immich + app.kubernetes.io/part-of: immich +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: immich/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-immich + namespace: immich + labels: + app.kubernetes.io/name: http-route-immich + app.kubernetes.io/instance: immich + app.kubernetes.io/part-of: immich +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - immich.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: immich-main + port: 2283 + weight: 100 +--- +# Source: immich/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "immich-postgresql-17-external-backup" + namespace: immich + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: immich-postgresql-17 + app.kubernetes.io/instance: immich + app.kubernetes.io/part-of: immich + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/immich/immich-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: immich-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: immich-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: immich/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "immich-postgresql-17-garage-local-backup" + namespace: immich + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: immich-postgresql-17 + app.kubernetes.io/instance: immich + app.kubernetes.io/part-of: immich + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/immich/immich-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: immich-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: immich-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: immich-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: immich/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "immich-postgresql-17-recovery" + namespace: immich + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: immich-postgresql-17 + app.kubernetes.io/instance: immich + app.kubernetes.io/part-of: immich + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/immich/immich-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: immich-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: immich-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: immich/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: immich-postgresql-17-alert-rules + namespace: immich + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: immich-postgresql-17 + app.kubernetes.io/instance: immich + app.kubernetes.io/part-of: immich + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/immich-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: immich + cnpg_cluster: immich-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: immich + cnpg_cluster: immich-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="immich"} - cnpg_pg_replication_is_wal_receiver_up{namespace="immich"}) < 1 + for: 5m + labels: + severity: critical + namespace: immich + cnpg_cluster: immich-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="immich"} - cnpg_pg_replication_is_wal_receiver_up{namespace="immich"}) < 2 + for: 5m + labels: + severity: warning + namespace: immich + cnpg_cluster: immich-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "immich/immich-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="immich", pod=~"immich-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="immich", pod=~"immich-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: immich + cnpg_cluster: immich-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "immich/immich-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="immich", pod=~"immich-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="immich", pod=~"immich-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: immich + cnpg_cluster: immich-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "immich/immich-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="immich",pod=~"immich-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: immich + cnpg_cluster: immich-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "immich/immich-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="immich", pod=~"immich-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: immich + cnpg_cluster: immich-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: immich + cnpg_cluster: immich-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "immich/immich-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="immich", persistentvolumeclaim=~"immich-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="immich", persistentvolumeclaim=~"immich-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="immich", persistentvolumeclaim=~"immich-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="immich", persistentvolumeclaim=~"immich-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="immich", persistentvolumeclaim=~"immich-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="immich", persistentvolumeclaim=~"immich-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"immich-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: immich + cnpg_cluster: immich-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "immich/immich-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="immich", persistentvolumeclaim=~"immich-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="immich", persistentvolumeclaim=~"immich-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="immich", persistentvolumeclaim=~"immich-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="immich", persistentvolumeclaim=~"immich-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="immich", persistentvolumeclaim=~"immich-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="immich", persistentvolumeclaim=~"immich-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"immich-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: immich + cnpg_cluster: immich-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "immich/immich-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="immich",pod=~"immich-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: immich + cnpg_cluster: immich-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: immich + cnpg_cluster: immich-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: immich + cnpg_cluster: immich-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: immich + cnpg_cluster: immich-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "immich/immich-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="immich", pod=~"immich-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: immich + cnpg_cluster: immich-postgresql-17-cluster +--- +# Source: immich/templates/redis-replication.yaml +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisReplication +metadata: + name: redis-replication-immich + namespace: immich + labels: + app.kubernetes.io/name: redis-replication-immich + app.kubernetes.io/instance: immich + app.kubernetes.io/part-of: immich +spec: + clusterSize: 3 + podSecurityContext: + runAsUser: 1000 + fsGroup: 1000 + kubernetesConfig: + image: quay.io/opstree/redis:v8.0.3 + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 50m + memory: 128Mi + storage: + volumeClaimTemplate: + spec: + storageClassName: ceph-block + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi + redisExporter: + enabled: true + image: quay.io/opstree/redis-exporter:v1.48.0 +--- +# Source: immich/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "immich-postgresql-17-daily-backup-scheduled-backup" + namespace: immich + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: immich-postgresql-17 + app.kubernetes.io/instance: immich + app.kubernetes.io/part-of: immich + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: immich-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "immich-postgresql-17-external-backup" +--- +# Source: immich/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "immich-postgresql-17-live-backup-scheduled-backup" + namespace: immich + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: immich-postgresql-17 + app.kubernetes.io/instance: immich + app.kubernetes.io/part-of: immich + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: immich-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "immich-postgresql-17-garage-local-backup" +--- +# Source: immich/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: immich + namespace: immich + labels: + app.kubernetes.io/name: immich + app.kubernetes.io/instance: immich + app.kubernetes.io/part-of: immich +spec: + selector: + matchLabels: + app.kubernetes.io/name: immich + app.kubernetes.io/instance: immich + endpoints: + - port: metrics-api + interval: 3m + scrapeTimeout: 1m + path: /metrics + - port: metrics-ms + interval: 3m + scrapeTimeout: 1m + path: /metrics +--- +# Source: immich/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: redis-replication-immich + namespace: immich + labels: + app.kubernetes.io/name: redis-replication-immich + app.kubernetes.io/instance: immich + app.kubernetes.io/part-of: immich + redis-operator: "true" + env: production +spec: + selector: + matchLabels: + redis_setup_type: replication + endpoints: + - port: redis-exporter + interval: 30s + scrapeTimeout: 10s diff --git a/clusters/cl01tl/manifests/jellyfin/jellyfin.yaml b/clusters/cl01tl/manifests/jellyfin/jellyfin.yaml new file mode 100644 index 000000000..ff366abed --- /dev/null +++ b/clusters/cl01tl/manifests/jellyfin/jellyfin.yaml @@ -0,0 +1,326 @@ +--- +# Source: jellyfin/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: jellyfin-nfs-storage + namespace: jellyfin + labels: + app.kubernetes.io/name: jellyfin-nfs-storage + app.kubernetes.io/instance: jellyfin + app.kubernetes.io/part-of: jellyfin +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: jellyfin/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: jellyfin-youtube-nfs-storage + namespace: jellyfin + labels: + app.kubernetes.io/name: jellyfin-youtube-nfs-storage + app.kubernetes.io/instance: jellyfin + app.kubernetes.io/part-of: jellyfin +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadOnlyMany + nfs: + path: /volume2/Storage/YouTube + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: jellyfin/charts/jellyfin/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: jellyfin-config + labels: + app.kubernetes.io/instance: jellyfin + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: jellyfin + helm.sh/chart: jellyfin-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: jellyfin +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "100Gi" + storageClassName: "ceph-block" +--- +# Source: jellyfin/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: jellyfin-nfs-storage + namespace: jellyfin + labels: + app.kubernetes.io/name: jellyfin-nfs-storage + app.kubernetes.io/instance: jellyfin + app.kubernetes.io/part-of: jellyfin +spec: + volumeName: jellyfin-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: jellyfin/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: jellyfin-youtube-nfs-storage + namespace: jellyfin + labels: + app.kubernetes.io/name: jellyfin-youtube-nfs-storage + app.kubernetes.io/instance: jellyfin + app.kubernetes.io/part-of: jellyfin +spec: + volumeName: jellyfin-youtube-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadOnlyMany + resources: + requests: + storage: 1Gi +--- +# Source: jellyfin/charts/jellyfin/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: jellyfin + labels: + app.kubernetes.io/instance: jellyfin + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: jellyfin + app.kubernetes.io/service: jellyfin + helm.sh/chart: jellyfin-4.4.0 + namespace: jellyfin +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 8096 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: jellyfin + app.kubernetes.io/name: jellyfin +--- +# Source: jellyfin/charts/jellyfin/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: jellyfin + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: jellyfin + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: jellyfin + helm.sh/chart: jellyfin-4.4.0 + namespace: jellyfin +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: jellyfin + app.kubernetes.io/instance: jellyfin + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: jellyfin + app.kubernetes.io/name: jellyfin + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: JELLYFIN_hostwebclient + value: "true" + - name: JELLYFIN_PublishedServerUrl + value: https://jellyfin.alexlebens.net/ + image: ghcr.io/jellyfin/jellyfin:10.11.3 + imagePullPolicy: IfNotPresent + name: main + resources: + limits: + gpu.intel.com/i915: 1 + requests: + cpu: 1 + gpu.intel.com/i915: 1 + memory: 2Gi + volumeMounts: + - mountPath: /cache + name: cache + - mountPath: /config + name: config + - mountPath: /mnt/store + name: media + - mountPath: /mnt/youtube + name: youtube + readOnly: true + volumes: + - emptyDir: {} + name: cache + - name: config + persistentVolumeClaim: + claimName: jellyfin-config + - name: media + persistentVolumeClaim: + claimName: jellyfin-nfs-storage + - name: youtube + persistentVolumeClaim: + claimName: jellyfin-youtube-nfs-storage +--- +# Source: jellyfin/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: jellyfin-config-backup-secret + namespace: jellyfin + labels: + app.kubernetes.io/name: jellyfin-config-backup-secret + app.kubernetes.io/instance: jellyfin + app.kubernetes.io/part-of: jellyfin +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/jellyfin/jellyfin-config" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: jellyfin/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-jellyfin + namespace: jellyfin + labels: + app.kubernetes.io/name: http-route-jellyfin + app.kubernetes.io/instance: jellyfin + app.kubernetes.io/part-of: jellyfin +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - jellyfin.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: jellyfin + port: 80 + weight: 100 +--- +# Source: jellyfin/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: jellyfin-config-backup-source + namespace: jellyfin + labels: + app.kubernetes.io/name: jellyfin-config-backup-source + app.kubernetes.io/instance: jellyfin + app.kubernetes.io/part-of: jellyfin +spec: + sourcePVC: jellyfin-config + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: jellyfin-config-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot + cacheCapacity: 10Gi diff --git a/clusters/cl01tl/manifests/jellystat/jellystat.yaml b/clusters/cl01tl/manifests/jellystat/jellystat.yaml new file mode 100644 index 000000000..96df352ba --- /dev/null +++ b/clusters/cl01tl/manifests/jellystat/jellystat.yaml @@ -0,0 +1,861 @@ +--- +# Source: jellystat/charts/jellystat/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: jellystat-data + labels: + app.kubernetes.io/instance: jellystat + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: jellystat + helm.sh/chart: jellystat-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: jellystat +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: jellystat/charts/jellystat/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: jellystat + labels: + app.kubernetes.io/instance: jellystat + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: jellystat + app.kubernetes.io/service: jellystat + helm.sh/chart: jellystat-4.4.0 + namespace: jellystat +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 3000 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: jellystat + app.kubernetes.io/name: jellystat +--- +# Source: jellystat/charts/jellystat/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: jellystat + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: jellystat + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: jellystat + helm.sh/chart: jellystat-4.4.0 + namespace: jellystat +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: jellystat + app.kubernetes.io/instance: jellystat + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: jellystat + app.kubernetes.io/name: jellystat + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: JWT_SECRET + valueFrom: + secretKeyRef: + key: secret-key + name: jellystat-secret + - name: JS_USER + valueFrom: + secretKeyRef: + key: user + name: jellystat-secret + - name: JS_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: jellystat-secret + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + key: username + name: jellystat-postgresql-17-cluster-app + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: jellystat-postgresql-17-cluster-app + - name: POSTGRES_DB + valueFrom: + secretKeyRef: + key: dbname + name: jellystat-postgresql-17-cluster-app + - name: POSTGRES_IP + valueFrom: + secretKeyRef: + key: host + name: jellystat-postgresql-17-cluster-app + - name: POSTGRES_PORT + valueFrom: + secretKeyRef: + key: port + name: jellystat-postgresql-17-cluster-app + image: cyfershepard/jellystat:1.1.6 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /app/backend/backup-data + name: data + volumes: + - name: data + persistentVolumeClaim: + claimName: jellystat-data +--- +# Source: jellystat/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: jellystat-postgresql-17-cluster + namespace: jellystat + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: jellystat-postgresql-17 + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "jellystat-postgresql-17-external-backup" + serverName: "jellystat-postgresql-17-backup-1" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "jellystat-postgresql-17-garage-local-backup" + serverName: "jellystat-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "jellystat-postgresql-17-recovery" + serverName: jellystat-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 256Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: jellystat-postgresql-17-backup-1 + + externalClusters: + - name: jellystat-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "jellystat-postgresql-17-recovery" + serverName: jellystat-postgresql-17-backup-1 +--- +# Source: jellystat/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: jellystat-secret + namespace: jellystat + labels: + app.kubernetes.io/name: jellystat-secret + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: secret-key + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/jellystat/auth + metadataPolicy: None + property: secret-key + - secretKey: user + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/jellystat/auth + metadataPolicy: None + property: user + - secretKey: password + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/jellystat/auth + metadataPolicy: None + property: password +--- +# Source: jellystat/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: jellystat-data-backup-secret + namespace: jellystat + labels: + app.kubernetes.io/name: jellystat-data-backup-secret + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/jellystat/jellystat-data" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: jellystat/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: jellystat-postgresql-17-cluster-backup-secret + namespace: jellystat + labels: + app.kubernetes.io/name: jellystat-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: jellystat/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: jellystat-postgresql-17-cluster-backup-secret-garage + namespace: jellystat + labels: + app.kubernetes.io/name: jellystat-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: jellystat/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-jellystat + namespace: jellystat + labels: + app.kubernetes.io/name: http-route-jellystat + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - jellystat.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: jellystat + port: 80 + weight: 100 +--- +# Source: jellystat/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "jellystat-postgresql-17-external-backup" + namespace: jellystat + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: jellystat-postgresql-17 + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/jellystat/jellystat-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: jellystat-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: jellystat-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: jellystat/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "jellystat-postgresql-17-garage-local-backup" + namespace: jellystat + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: jellystat-postgresql-17 + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/jellystat/jellystat-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: jellystat-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: jellystat-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: jellystat-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: jellystat/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "jellystat-postgresql-17-recovery" + namespace: jellystat + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: jellystat-postgresql-17 + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/jellystat/jellystat-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: jellystat-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: jellystat-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: jellystat/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: jellystat-postgresql-17-alert-rules + namespace: jellystat + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: jellystat-postgresql-17 + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/jellystat-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="jellystat"} - cnpg_pg_replication_is_wal_receiver_up{namespace="jellystat"}) < 1 + for: 5m + labels: + severity: critical + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="jellystat"} - cnpg_pg_replication_is_wal_receiver_up{namespace="jellystat"}) < 2 + for: 5m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="jellystat",pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="jellystat",pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster +--- +# Source: jellystat/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: jellystat-data-backup-source + namespace: jellystat + labels: + app.kubernetes.io/name: jellystat-data-backup-source + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat +spec: + sourcePVC: jellystat-data + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: jellystat-data-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: jellystat/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "jellystat-postgresql-17-daily-backup-scheduled-backup" + namespace: jellystat + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: jellystat-postgresql-17 + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: jellystat-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "jellystat-postgresql-17-external-backup" +--- +# Source: jellystat/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "jellystat-postgresql-17-live-backup-scheduled-backup" + namespace: jellystat + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: jellystat-postgresql-17 + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: jellystat-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "jellystat-postgresql-17-garage-local-backup" diff --git a/clusters/cl01tl/manifests/karakeep/karakeep.yaml b/clusters/cl01tl/manifests/karakeep/karakeep.yaml new file mode 100644 index 000000000..6d18cfc17 --- /dev/null +++ b/clusters/cl01tl/manifests/karakeep/karakeep.yaml @@ -0,0 +1,711 @@ +--- +# Source: karakeep/charts/meilisearch/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: karakeep-meilisearch + labels: + helm.sh/chart: meilisearch-0.17.1 + app.kubernetes.io/name: meilisearch + app.kubernetes.io/instance: karakeep + app.kubernetes.io/version: "v1.18.0" + app.kubernetes.io/component: search-engine + app.kubernetes.io/part-of: meilisearch + app.kubernetes.io/managed-by: Helm +--- +# Source: karakeep/charts/meilisearch/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: karakeep-meilisearch-environment + labels: + helm.sh/chart: meilisearch-0.17.1 + app.kubernetes.io/name: meilisearch + app.kubernetes.io/instance: karakeep + app.kubernetes.io/version: "v1.18.0" + app.kubernetes.io/component: search-engine + app.kubernetes.io/part-of: meilisearch + app.kubernetes.io/managed-by: Helm +data: + MEILI_ENV: "production" + MEILI_EXPERIMENTAL_DUMPLESS_UPGRADE: "true" + MEILI_NO_ANALYTICS: "true" + MEILI_EXPERIMENTAL_ENABLE_METRICS: "true" +--- +# Source: karakeep/charts/karakeep/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: karakeep + labels: + app.kubernetes.io/instance: karakeep + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: karakeep + helm.sh/chart: karakeep-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: karakeep +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "10Gi" + storageClassName: "ceph-block" +--- +# Source: karakeep/charts/meilisearch/templates/pvc.yaml +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: karakeep-meilisearch + labels: + helm.sh/chart: meilisearch-0.17.1 + app.kubernetes.io/name: meilisearch + app.kubernetes.io/instance: karakeep + app.kubernetes.io/version: "v1.18.0" + app.kubernetes.io/component: search-engine + app.kubernetes.io/part-of: meilisearch + app.kubernetes.io/managed-by: Helm +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "10Gi" + storageClassName: "ceph-block" +--- +# Source: karakeep/charts/karakeep/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: karakeep + labels: + app.kubernetes.io/instance: karakeep + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: karakeep + app.kubernetes.io/service: karakeep + helm.sh/chart: karakeep-4.4.0 + namespace: karakeep +spec: + type: ClusterIP + ports: + - port: 9222 + targetPort: 9222 + protocol: TCP + name: chrome + - port: 3000 + targetPort: 3000 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: karakeep + app.kubernetes.io/name: karakeep +--- +# Source: karakeep/charts/meilisearch/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: karakeep-meilisearch + labels: + helm.sh/chart: meilisearch-0.17.1 + app.kubernetes.io/name: meilisearch + app.kubernetes.io/instance: karakeep + app.kubernetes.io/version: "v1.18.0" + app.kubernetes.io/component: search-engine + app.kubernetes.io/part-of: meilisearch + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + ports: + - port: 7700 + targetPort: http + protocol: TCP + name: http + selector: + app.kubernetes.io/name: meilisearch + app.kubernetes.io/instance: karakeep +--- +# Source: karakeep/charts/cloudflared/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: karakeep-cloudflared + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: karakeep + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cloudflared + app.kubernetes.io/version: 2025.10.0 + helm.sh/chart: cloudflared-1.23.0 + namespace: karakeep +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: cloudflared + app.kubernetes.io/instance: karakeep + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: karakeep + app.kubernetes.io/name: cloudflared + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - args: + - tunnel + - --protocol + - http2 + - --no-autoupdate + - run + - --token + - $(CF_MANAGED_TUNNEL_TOKEN) + env: + - name: CF_MANAGED_TUNNEL_TOKEN + valueFrom: + secretKeyRef: + key: cf-tunnel-token + name: karakeep-cloudflared-secret + image: cloudflare/cloudflared:2025.11.1 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: karakeep/charts/karakeep/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: karakeep + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: karakeep + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: karakeep + helm.sh/chart: karakeep-4.4.0 + namespace: karakeep +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: karakeep + app.kubernetes.io/instance: karakeep + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: karakeep + app.kubernetes.io/name: karakeep + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - args: + - --no-sandbox + - --disable-gpu + - --disable-dev-shm-usage + - --remote-debugging-address=0.0.0.0 + - --remote-debugging-port=9222 + - --hide-scrollbars + image: gcr.io/zenika-hub/alpine-chrome:124 + imagePullPolicy: IfNotPresent + name: chrome + resources: + requests: + cpu: 10m + memory: 128Mi + - env: + - name: DATA_DIR + value: /data + - name: DB_WAL_MODE + value: "true" + - name: NEXTAUTH_URL + value: https://karakeep.alexlebens.dev/ + - name: NEXTAUTH_SECRET + valueFrom: + secretKeyRef: + key: key + name: karakeep-key-secret + - name: PROMETHEUS_AUTH_TOKEN + valueFrom: + secretKeyRef: + key: prometheus-token + name: karakeep-key-secret + - name: ASSET_STORE_S3_ENDPOINT + value: http://rook-ceph-rgw-ceph-objectstore.rook-ceph.svc:80 + - name: ASSET_STORE_S3_REGION + value: us-east-1 + - name: ASSET_STORE_S3_BUCKET + valueFrom: + configMapKeyRef: + key: BUCKET_NAME + name: ceph-bucket-karakeep + - name: ASSET_STORE_S3_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + key: AWS_ACCESS_KEY_ID + name: ceph-bucket-karakeep + - name: ASSET_STORE_S3_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: AWS_SECRET_ACCESS_KEY + name: ceph-bucket-karakeep + - name: ASSET_STORE_S3_FORCE_PATH_STYLE + value: "true" + - name: MEILI_ADDR + value: http://karakeep-meilisearch.karakeep:7700 + - name: MEILI_MASTER_KEY + valueFrom: + secretKeyRef: + key: MEILI_MASTER_KEY + name: karakeep-meilisearch-master-key-secret + - name: BROWSER_WEB_URL + value: http://karakeep.karakeep:9222 + - name: DISABLE_SIGNUPS + value: "false" + - name: OAUTH_PROVIDER_NAME + value: Authentik + - name: OAUTH_WELLKNOWN_URL + value: https://auth.alexlebens.dev/application/o/karakeep/.well-known/openid-configuration + - name: OAUTH_SCOPE + value: openid email profile + - name: OAUTH_CLIENT_ID + valueFrom: + secretKeyRef: + key: AUTHENTIK_CLIENT_ID + name: karakeep-oidc-secret + - name: OAUTH_CLIENT_SECRET + valueFrom: + secretKeyRef: + key: AUTHENTIK_CLIENT_SECRET + name: karakeep-oidc-secret + - name: OLLAMA_BASE_URL + value: http://ollama-server-3.ollama:11434 + - name: OLLAMA_KEEP_ALIVE + value: 5m + - name: INFERENCE_TEXT_MODEL + value: gemma3:4b + - name: INFERENCE_IMAGE_MODEL + value: granite3.2-vision:2b + - name: EMBEDDING_TEXT_MODEL + value: mxbai-embed-large + - name: INFERENCE_JOB_TIMEOUT_SEC + value: "720" + image: ghcr.io/karakeep-app/karakeep:0.28.0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /data + name: data + volumes: + - name: data + persistentVolumeClaim: + claimName: karakeep +--- +# Source: karakeep/charts/meilisearch/templates/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: karakeep-meilisearch + labels: + helm.sh/chart: meilisearch-0.17.1 + app.kubernetes.io/name: meilisearch + app.kubernetes.io/instance: karakeep + app.kubernetes.io/version: "v1.18.0" + app.kubernetes.io/component: search-engine + app.kubernetes.io/part-of: meilisearch + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + serviceName: karakeep-meilisearch + selector: + matchLabels: + app.kubernetes.io/name: meilisearch + app.kubernetes.io/instance: karakeep + template: + metadata: + labels: + helm.sh/chart: meilisearch-0.17.1 + app.kubernetes.io/name: meilisearch + app.kubernetes.io/instance: karakeep + app.kubernetes.io/version: "v1.18.0" + app.kubernetes.io/component: search-engine + app.kubernetes.io/part-of: meilisearch + app.kubernetes.io/managed-by: Helm + annotations: + checksum/config: e3114e6f2910e1678611b9df77ee9eb63744c6e143f716dd8aa5f015391a2ef3 + spec: + serviceAccountName: karakeep-meilisearch + securityContext: + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + runAsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000 + volumes: + - name: tmp + emptyDir: {} + - name: data + persistentVolumeClaim: + claimName: karakeep-meilisearch + + + + containers: + - name: meilisearch + image: "getmeili/meilisearch:v1.18.0" + imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + volumeMounts: + - name: tmp + mountPath: /tmp + - name: data + mountPath: /meili_data + envFrom: + - configMapRef: + name: karakeep-meilisearch-environment + - secretRef: + name: karakeep-meilisearch-master-key-secret + ports: + - name: http + containerPort: 7700 + protocol: TCP + startupProbe: + httpGet: + path: /health + port: http + periodSeconds: 1 + initialDelaySeconds: 1 + failureThreshold: 60 + timeoutSeconds: 1 + livenessProbe: + httpGet: + path: /health + port: http + periodSeconds: 10 + initialDelaySeconds: 0 + timeoutSeconds: 10 + readinessProbe: + httpGet: + path: /health + port: http + periodSeconds: 10 + initialDelaySeconds: 0 + timeoutSeconds: 10 + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: karakeep/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: karakeep-key-secret + namespace: karakeep + labels: + app.kubernetes.io/name: karakeep-key-secret + app.kubernetes.io/instance: karakeep + app.kubernetes.io/part-of: karakeep +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: key + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/karakeep/key + metadataPolicy: None + property: key + - secretKey: prometheus-token + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/karakeep/key + metadataPolicy: None + property: prometheus-token +--- +# Source: karakeep/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: karakeep-oidc-secret + namespace: karakeep + labels: + app.kubernetes.io/name: karakeep-oidc-secret + app.kubernetes.io/instance: karakeep + app.kubernetes.io/part-of: karakeep +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: AUTHENTIK_CLIENT_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/karakeep + metadataPolicy: None + property: client + - secretKey: AUTHENTIK_CLIENT_SECRET + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/karakeep + metadataPolicy: None + property: secret +--- +# Source: karakeep/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: karakeep-meilisearch-master-key-secret + namespace: karakeep + labels: + app.kubernetes.io/name: karakeep-meilisearch-master-key-secret + app.kubernetes.io/instance: karakeep + app.kubernetes.io/part-of: karakeep +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: MEILI_MASTER_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/karakeep/meilisearch + metadataPolicy: None + property: MEILI_MASTER_KEY +--- +# Source: karakeep/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: karakeep-cloudflared-secret + namespace: karakeep + labels: + app.kubernetes.io/name: karakeep-cloudflared-secret + app.kubernetes.io/instance: karakeep + app.kubernetes.io/part-of: karakeep +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: cf-tunnel-token + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cloudflare/tunnels/karakeep + metadataPolicy: None + property: token +--- +# Source: karakeep/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: karakeep-data-backup-secret + namespace: karakeep + labels: + app.kubernetes.io/name: karakeep-data-backup-secret + app.kubernetes.io/instance: karakeep + app.kubernetes.io/part-of: karakeep +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/karakeep/karakeep-data" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: karakeep/templates/object-bucket-claim.yaml +apiVersion: objectbucket.io/v1alpha1 +kind: ObjectBucketClaim +metadata: + name: ceph-bucket-karakeep + labels: + app.kubernetes.io/name: ceph-bucket-karakeep + app.kubernetes.io/instance: karakeep + app.kubernetes.io/part-of: karakeep +spec: + generateBucketName: bucket-karakeep + storageClassName: ceph-bucket +--- +# Source: karakeep/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: karakeep-data-backup-source + namespace: karakeep + labels: + app.kubernetes.io/name: karakeep-data-backup-source + app.kubernetes.io/instance: karakeep + app.kubernetes.io/part-of: karakeep +spec: + sourcePVC: karakeep-data + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: karakeep-data-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: karakeep/charts/meilisearch/templates/serviceMonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: karakeep-meilisearch + namespace: karakeep + labels: + helm.sh/chart: meilisearch-0.17.1 + app.kubernetes.io/name: meilisearch + app.kubernetes.io/instance: karakeep + app.kubernetes.io/version: "v1.18.0" + app.kubernetes.io/component: search-engine + app.kubernetes.io/part-of: meilisearch + app.kubernetes.io/managed-by: Helm +spec: + jobLabel: karakeep + namespaceSelector: + matchNames: + - karakeep + selector: + matchLabels: + app.kubernetes.io/name: meilisearch + app.kubernetes.io/instance: karakeep + endpoints: + - port: http + path: /metrics + interval: 1m + scrapeTimeout: 10s + bearerTokenSecret: + name: karakeep-meilisearch-master-key-secret + key: MEILI_MASTER_KEY +--- +# Source: karakeep/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: karakeep + namespace: karakeep + labels: + app.kubernetes.io/name: karakeep + app.kubernetes.io/instance: karakeep + app.kubernetes.io/part-of: karakeep +spec: + endpoints: + - port: http + interval: 30s + scrapeTimeout: 15s + path: /api/metrics + authorization: + credentials: + key: prometheus-token + name: karakeep-key-secret + selector: + matchLabels: + app.kubernetes.io/name: karakeep + app.kubernetes.io/instance: karakeep +--- +# Source: karakeep/charts/meilisearch/templates/tests/test-connection.yaml +apiVersion: v1 +kind: Pod +metadata: + name: karakeep-meilisearch-test-connection + labels: + app.kubernetes.io/name: meilisearch + helm.sh/chart: meilisearch-0.17.1 + app.kubernetes.io/instance: karakeep + app.kubernetes.io/managed-by: Helm + annotations: + "helm.sh/hook": test-success +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['karakeep-meilisearch:7700'] + restartPolicy: Never diff --git a/clusters/cl01tl/manifests/kiwix/kiwix.yaml b/clusters/cl01tl/manifests/kiwix/kiwix.yaml new file mode 100644 index 000000000..dff91e214 --- /dev/null +++ b/clusters/cl01tl/manifests/kiwix/kiwix.yaml @@ -0,0 +1,157 @@ +--- +# Source: kiwix/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: kiwix-nfs-storage + namespace: kiwix + labels: + app.kubernetes.io/name: kiwix-nfs-storage + app.kubernetes.io/instance: kiwix + app.kubernetes.io/part-of: kiwix +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage/Kiwix + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: kiwix/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: kiwix-nfs-storage + namespace: kiwix + labels: + app.kubernetes.io/name: kiwix-nfs-storage + app.kubernetes.io/instance: kiwix + app.kubernetes.io/part-of: kiwix +spec: + volumeName: kiwix-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: kiwix/charts/kiwix/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: kiwix + labels: + app.kubernetes.io/instance: kiwix + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: kiwix + app.kubernetes.io/service: kiwix + helm.sh/chart: kiwix-4.4.0 + namespace: kiwix +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: kiwix + app.kubernetes.io/name: kiwix +--- +# Source: kiwix/charts/kiwix/templates/common.yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kiwix + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: kiwix + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: kiwix + helm.sh/chart: kiwix-4.4.0 + namespace: kiwix +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: kiwix + app.kubernetes.io/instance: kiwix + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: kiwix + app.kubernetes.io/name: kiwix + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - args: + - '*.zim' + env: + - name: PORT + value: "8080" + image: ghcr.io/kiwix/kiwix-serve:3.8.0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 50m + memory: 512Mi + volumeMounts: + - mountPath: /data + name: media + readOnly: true + volumes: + - name: media + persistentVolumeClaim: + claimName: kiwix-nfs-storage +--- +# Source: kiwix/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-kiwix + namespace: kiwix + labels: + app.kubernetes.io/name: http-route-kiwix + app.kubernetes.io/instance: kiwix + app.kubernetes.io/part-of: kiwix +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - kiwix.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: kiwix + port: 80 + weight: 100 diff --git a/clusters/cl01tl/manifests/libation/libation.yaml b/clusters/cl01tl/manifests/libation/libation.yaml new file mode 100644 index 000000000..dbc1d354e --- /dev/null +++ b/clusters/cl01tl/manifests/libation/libation.yaml @@ -0,0 +1,129 @@ +--- +# Source: libation/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: libation-nfs-storage + namespace: libation + labels: + app.kubernetes.io/name: libation-nfs-storage + app.kubernetes.io/instance: libation + app.kubernetes.io/part-of: libation +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage/Audiobooks/ + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: libation/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: libation-config + namespace: libation + labels: + app.kubernetes.io/name: libation-config + app.kubernetes.io/instance: libation + app.kubernetes.io/part-of: libation +spec: + storageClassName: nfs-client + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + volumeMode: Filesystem +--- +# Source: libation/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: libation-nfs-storage + namespace: libation + labels: + app.kubernetes.io/name: libation-nfs-storage + app.kubernetes.io/instance: libation + app.kubernetes.io/part-of: libation +spec: + volumeName: libation-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: libation/charts/libation/templates/common.yaml +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: libation + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: libation + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: libation + helm.sh/chart: libation-4.4.0 + namespace: libation +spec: + suspend: false + concurrencyPolicy: Forbid + startingDeadlineSeconds: 90 + timeZone: US/Central + schedule: "30 4 * * *" + successfulJobsHistoryLimit: 3 + failedJobsHistoryLimit: 3 + jobTemplate: + spec: + parallelism: 1 + backoffLimit: 3 + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: libation + app.kubernetes.io/name: libation + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + restartPolicy: Never + containers: + - env: + - name: SLEEP_TIME + value: "-1" + - name: LIBATION_BOOKS_DIR + value: /data + image: rmcrackan/libation:12.7.4 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /config + name: config + - mountPath: /data + name: data + volumes: + - name: config + persistentVolumeClaim: + claimName: libation-config + - name: data + persistentVolumeClaim: + claimName: libation-nfs-storage diff --git a/clusters/cl01tl/manifests/lidarr/lidarr.yaml b/clusters/cl01tl/manifests/lidarr/lidarr.yaml new file mode 100644 index 000000000..ee12d7219 --- /dev/null +++ b/clusters/cl01tl/manifests/lidarr/lidarr.yaml @@ -0,0 +1,928 @@ +--- +# Source: lidarr/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: lidarr-nfs-storage + namespace: lidarr + labels: + app.kubernetes.io/name: lidarr-nfs-storage + app.kubernetes.io/instance: lidarr + app.kubernetes.io/part-of: lidarr +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: lidarr/charts/lidarr/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: lidarr-config + labels: + app.kubernetes.io/instance: lidarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: lidarr + helm.sh/chart: lidarr-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: lidarr +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "10Gi" + storageClassName: "ceph-block" +--- +# Source: lidarr/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: lidarr-nfs-storage + namespace: lidarr + labels: + app.kubernetes.io/name: lidarr-nfs-storage + app.kubernetes.io/instance: lidarr + app.kubernetes.io/part-of: lidarr +spec: + volumeName: lidarr-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: lidarr/charts/lidarr/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: lidarr + labels: + app.kubernetes.io/instance: lidarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: lidarr + app.kubernetes.io/service: lidarr + helm.sh/chart: lidarr-4.4.0 + namespace: lidarr +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 8686 + protocol: TCP + name: http + - port: 9792 + targetPort: 9792 + protocol: TCP + name: metrics + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: lidarr + app.kubernetes.io/name: lidarr +--- +# Source: lidarr/charts/lidarr/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: lidarr + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: lidarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: lidarr + helm.sh/chart: lidarr-4.4.0 + namespace: lidarr +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: lidarr + app.kubernetes.io/instance: lidarr + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: lidarr + app.kubernetes.io/name: lidarr + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + securityContext: + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + runAsGroup: 1000 + runAsUser: 1000 + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: PUID + value: "1000" + - name: PGID + value: "1000" + image: ghcr.io/linuxserver/lidarr:2.14.5@sha256:5e1235d00b5d1c1f60ca0d472e554a6611aef41aa7b5b6d88260214bf4809af0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /config + name: config + - mountPath: /mnt/store + name: media + - args: + - lidarr + env: + - name: URL + value: http://localhost + - name: CONFIG + value: /config/config.xml + - name: PORT + value: "9792" + - name: ENABLE_ADDITIONAL_METRICS + value: "false" + - name: ENABLE_UNKNOWN_QUEUE_ITEMS + value: "false" + image: ghcr.io/onedr0p/exportarr:v2.3.0 + imagePullPolicy: IfNotPresent + name: metrics + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /config + name: config + readOnly: true + volumes: + - name: config + persistentVolumeClaim: + claimName: lidarr-config + - name: media + persistentVolumeClaim: + claimName: lidarr-nfs-storage +--- +# Source: lidarr/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: lidarr2-postgresql-17-cluster + namespace: lidarr + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: lidarr2-postgresql-17 + app.kubernetes.io/instance: lidarr + app.kubernetes.io/part-of: lidarr + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "lidarr2-postgresql-17-external-backup" + serverName: "lidarr2-postgresql-17-backup-1" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "lidarr2-postgresql-17-garage-local-backup" + serverName: "lidarr2-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "lidarr2-postgresql-17-recovery" + serverName: lidarr2-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 200m + memory: 1Gi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: lidarr2-postgresql-17-backup-1 + + externalClusters: + - name: lidarr2-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "lidarr2-postgresql-17-recovery" + serverName: lidarr2-postgresql-17-backup-1 +--- +# Source: lidarr/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: lidarr-config-backup-secret + namespace: lidarr + labels: + app.kubernetes.io/name: lidarr-config-backup-secret + app.kubernetes.io/instance: lidarr + app.kubernetes.io/part-of: lidarr +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/lidarr2/lidarr2-config" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: lidarr/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: lidarr-postgresql-17-cluster-backup-secret + namespace: lidarr + labels: + app.kubernetes.io/name: lidarr-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: lidarr + app.kubernetes.io/part-of: lidarr +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: lidarr/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: lidarr-postgresql-17-cluster-backup-secret-garage + namespace: lidarr + labels: + app.kubernetes.io/name: lidarr-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: lidarr + app.kubernetes.io/part-of: lidarr +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: lidarr/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-lidarr + namespace: lidarr + labels: + app.kubernetes.io/name: http-route-lidarr + app.kubernetes.io/instance: lidarr + app.kubernetes.io/part-of: lidarr +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - lidarr.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: lidarr + port: 80 + weight: 100 +--- +# Source: lidarr/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "lidarr2-postgresql-17-external-backup" + namespace: lidarr + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: lidarr2-postgresql-17 + app.kubernetes.io/instance: lidarr + app.kubernetes.io/part-of: lidarr + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/lidarr2/lidarr2-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: lidarr-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: lidarr-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: lidarr/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "lidarr2-postgresql-17-garage-local-backup" + namespace: lidarr + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: lidarr2-postgresql-17 + app.kubernetes.io/instance: lidarr + app.kubernetes.io/part-of: lidarr + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/lidarr/lidarr2-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: lidarr-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: lidarr-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: lidarr-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: lidarr/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "lidarr2-postgresql-17-recovery" + namespace: lidarr + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: lidarr2-postgresql-17 + app.kubernetes.io/instance: lidarr + app.kubernetes.io/part-of: lidarr + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/lidarr/lidarr2-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: lidarr-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: lidarr-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: lidarr/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: lidarr2-postgresql-17-alert-rules + namespace: lidarr + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: lidarr2-postgresql-17 + app.kubernetes.io/instance: lidarr + app.kubernetes.io/part-of: lidarr + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/lidarr2-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: lidarr + cnpg_cluster: lidarr2-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: lidarr + cnpg_cluster: lidarr2-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="lidarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="lidarr"}) < 1 + for: 5m + labels: + severity: critical + namespace: lidarr + cnpg_cluster: lidarr2-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="lidarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="lidarr"}) < 2 + for: 5m + labels: + severity: warning + namespace: lidarr + cnpg_cluster: lidarr2-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: lidarr + cnpg_cluster: lidarr2-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: lidarr + cnpg_cluster: lidarr2-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="lidarr",pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: lidarr + cnpg_cluster: lidarr2-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: lidarr + cnpg_cluster: lidarr2-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: lidarr + cnpg_cluster: lidarr2-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: lidarr + cnpg_cluster: lidarr2-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: lidarr + cnpg_cluster: lidarr2-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="lidarr",pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: lidarr + cnpg_cluster: lidarr2-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: lidarr + cnpg_cluster: lidarr2-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: lidarr + cnpg_cluster: lidarr2-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: lidarr + cnpg_cluster: lidarr2-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: lidarr + cnpg_cluster: lidarr2-postgresql-17-cluster +--- +# Source: lidarr/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: lidarr + namespace: lidarr + labels: + app.kubernetes.io/name: lidarr + app.kubernetes.io/instance: lidarr + app.kubernetes.io/part-of: lidarr +spec: + groups: + - name: lidarr + rules: + - alert: ExportarrAbsent + annotations: + description: Lidarr Exportarr has disappeared from Prometheus + service discovery. + summary: Exportarr is down. + expr: | + absent(up{job=~".*lidarr.*"} == 1) + for: 5m + labels: + severity: critical + - alert: LidarrDown + annotations: + description: Lidarr service is down. + summary: Lidarr is down. + expr: | + lidarr_system_status{job=~".*lidarr.*"} == 0 + for: 5m + labels: + severity: critical +--- +# Source: lidarr/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: lidarr-config-backup-source + namespace: lidarr + labels: + app.kubernetes.io/name: lidarr-config-backup-source + app.kubernetes.io/instance: lidarr + app.kubernetes.io/part-of: lidarr +spec: + sourcePVC: lidarr-config + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: lidarr-config-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + moverSecurityContext: + runAsUser: 1000 + runAsGroup: 1000 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: lidarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "lidarr2-postgresql-17-daily-backup-scheduled-backup" + namespace: lidarr + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: lidarr2-postgresql-17 + app.kubernetes.io/instance: lidarr + app.kubernetes.io/part-of: lidarr + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: lidarr2-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "lidarr2-postgresql-17-external-backup" +--- +# Source: lidarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "lidarr2-postgresql-17-live-backup-scheduled-backup" + namespace: lidarr + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: lidarr2-postgresql-17 + app.kubernetes.io/instance: lidarr + app.kubernetes.io/part-of: lidarr + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: lidarr2-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "lidarr2-postgresql-17-garage-local-backup" +--- +# Source: lidarr/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: lidarr + namespace: lidarr + labels: + app.kubernetes.io/name: lidarr + app.kubernetes.io/instance: lidarr + app.kubernetes.io/part-of: lidarr +spec: + selector: + matchLabels: + app.kubernetes.io/name: lidarr + app.kubernetes.io/instance: lidarr + endpoints: + - port: metrics + interval: 3m + scrapeTimeout: 1m + path: /metrics diff --git a/clusters/cl01tl/manifests/lidatube/lidatube.yaml b/clusters/cl01tl/manifests/lidatube/lidatube.yaml new file mode 100644 index 000000000..d9eb9fdc7 --- /dev/null +++ b/clusters/cl01tl/manifests/lidatube/lidatube.yaml @@ -0,0 +1,221 @@ +--- +# Source: lidatube/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: lidatube-nfs-storage + namespace: lidatube + labels: + app.kubernetes.io/name: lidatube-nfs-storage + app.kubernetes.io/instance: lidatube + app.kubernetes.io/part-of: lidatube +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage/Music + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: lidatube/charts/lidatube/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: lidatube-config + labels: + app.kubernetes.io/instance: lidatube + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: lidatube + helm.sh/chart: lidatube-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: lidatube +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: lidatube/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: lidatube-nfs-storage + namespace: lidatube + labels: + app.kubernetes.io/name: lidatube-nfs-storage + app.kubernetes.io/instance: lidatube + app.kubernetes.io/part-of: lidatube +spec: + volumeName: lidatube-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: lidatube/charts/lidatube/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: lidatube + labels: + app.kubernetes.io/instance: lidatube + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: lidatube + app.kubernetes.io/service: lidatube + helm.sh/chart: lidatube-4.4.0 + namespace: lidatube +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 5000 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: lidatube + app.kubernetes.io/name: lidatube +--- +# Source: lidatube/charts/lidatube/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: lidatube + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: lidatube + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: lidatube + helm.sh/chart: lidatube-4.4.0 + namespace: lidatube +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: lidatube + app.kubernetes.io/instance: lidatube + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: lidatube + app.kubernetes.io/name: lidatube + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + securityContext: + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: PUID + value: "1000" + - name: PGID + value: "1000" + - name: lidarr_address + value: http://lidarr.lidarr:80 + - name: lidarr_api_key + valueFrom: + secretKeyRef: + key: lidarr_api_key + name: lidatube-secret + - name: sleep_interval + value: "360" + - name: sync_schedule + value: "4" + - name: attempt_lidarr_import + value: "true" + image: thewicklowwolf/lidatube:0.2.41 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /lidatube/config + name: config + - mountPath: /lidatube/downloads + name: music + volumes: + - name: config + persistentVolumeClaim: + claimName: lidatube-config + - name: music + persistentVolumeClaim: + claimName: lidatube-nfs-storage +--- +# Source: lidatube/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: lidatube-secret + namespace: lidatube + labels: + app.kubernetes.io/name: lidatube-secret + app.kubernetes.io/instance: lidatube + app.kubernetes.io/part-of: lidatube +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: lidarr_api_key + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/lidarr2/key + metadataPolicy: None + property: key +--- +# Source: lidatube/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-lidatube + namespace: lidatube + labels: + app.kubernetes.io/name: http-route-lidatube + app.kubernetes.io/instance: lidatube + app.kubernetes.io/part-of: lidatube +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - lidatube.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: lidatube + port: 80 + weight: 100 diff --git a/clusters/cl01tl/manifests/listenarr/listenarr.yaml b/clusters/cl01tl/manifests/listenarr/listenarr.yaml new file mode 100644 index 000000000..001012d3a --- /dev/null +++ b/clusters/cl01tl/manifests/listenarr/listenarr.yaml @@ -0,0 +1,180 @@ +--- +# Source: listenarr/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: listenarr-nfs-storage + namespace: listenarr + labels: + app.kubernetes.io/name: listenarr-nfs-storage + app.kubernetes.io/instance: listenarr + app.kubernetes.io/part-of: listenarr +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage/Audiobooks + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: listenarr/charts/listenarr/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: listenarr + labels: + app.kubernetes.io/instance: listenarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: listenarr + helm.sh/chart: listenarr-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: listenarr +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: listenarr/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: listenarr-nfs-storage + namespace: listenarr + labels: + app.kubernetes.io/name: listenarr-nfs-storage + app.kubernetes.io/instance: listenarr + app.kubernetes.io/part-of: listenarr +spec: + volumeName: listenarr-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: listenarr/charts/listenarr/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: listenarr + labels: + app.kubernetes.io/instance: listenarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: listenarr + app.kubernetes.io/service: listenarr + helm.sh/chart: listenarr-4.4.0 + namespace: listenarr +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 5000 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: listenarr + app.kubernetes.io/name: listenarr +--- +# Source: listenarr/charts/listenarr/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: listenarr + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: listenarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: listenarr + helm.sh/chart: listenarr-4.4.0 + namespace: listenarr +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: listenarr + app.kubernetes.io/instance: listenarr + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: listenarr + app.kubernetes.io/name: listenarr + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: LISTENARR_PUBLIC_URL + value: https://listenarr.alexlebens.net + image: therobbiedavis/listenarr:canary-0.2.35 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 50m + memory: 128Mi + volumeMounts: + - mountPath: /app/config + name: config + - mountPath: /data + name: media + volumes: + - name: config + persistentVolumeClaim: + claimName: listenarr + - name: media + persistentVolumeClaim: + claimName: listenarr-nfs-storage +--- +# Source: listenarr/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-listenarr + namespace: listenarr + labels: + app.kubernetes.io/name: http-route-listenarr + app.kubernetes.io/instance: listenarr + app.kubernetes.io/part-of: listenarr +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - listenarr.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: listenarr + port: 80 + weight: 100 diff --git a/clusters/cl01tl/manifests/omni-tools/omni-tools.yaml b/clusters/cl01tl/manifests/omni-tools/omni-tools.yaml new file mode 100644 index 000000000..03b280a74 --- /dev/null +++ b/clusters/cl01tl/manifests/omni-tools/omni-tools.yaml @@ -0,0 +1,100 @@ +--- +# Source: omni-tools/charts/omni-tools/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: omni-tools + labels: + app.kubernetes.io/instance: omni-tools + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: omni-tools + app.kubernetes.io/service: omni-tools + helm.sh/chart: omni-tools-4.4.0 + namespace: omni-tools +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: omni-tools + app.kubernetes.io/name: omni-tools +--- +# Source: omni-tools/charts/omni-tools/templates/common.yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: omni-tools + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: omni-tools + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: omni-tools + helm.sh/chart: omni-tools-4.4.0 + namespace: omni-tools +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: omni-tools + app.kubernetes.io/instance: omni-tools + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: omni-tools + app.kubernetes.io/name: omni-tools + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - image: iib0011/omni-tools:0.6.0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 50m + memory: 512Mi +--- +# Source: omni-tools/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-omni-tools + namespace: omni-tools + labels: + app.kubernetes.io/name: http-route-omni-tools + app.kubernetes.io/instance: omni-tools + app.kubernetes.io/part-of: omni-tools +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - omni-tools.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: omni-tools + port: 80 + weight: 100 diff --git a/clusters/cl01tl/manifests/outline/outline.yaml b/clusters/cl01tl/manifests/outline/outline.yaml new file mode 100644 index 000000000..8334e69b2 --- /dev/null +++ b/clusters/cl01tl/manifests/outline/outline.yaml @@ -0,0 +1,988 @@ +--- +# Source: outline/charts/outline/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: outline + labels: + app.kubernetes.io/instance: outline + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: outline + app.kubernetes.io/service: outline + helm.sh/chart: outline-4.4.0 + namespace: outline +spec: + type: ClusterIP + ports: + - port: 3000 + targetPort: 3000 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: outline + app.kubernetes.io/name: outline +--- +# Source: outline/charts/cloudflared-outline/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: outline-cloudflared-outline + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: outline + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cloudflared-outline + app.kubernetes.io/version: 2025.10.0 + helm.sh/chart: cloudflared-outline-1.23.0 + namespace: outline +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: cloudflared-outline + app.kubernetes.io/instance: outline + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: outline + app.kubernetes.io/name: cloudflared-outline + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - args: + - tunnel + - --protocol + - http2 + - --no-autoupdate + - run + - --token + - $(CF_MANAGED_TUNNEL_TOKEN) + env: + - name: CF_MANAGED_TUNNEL_TOKEN + valueFrom: + secretKeyRef: + key: cf-tunnel-token + name: outline-cloudflared-secret + image: cloudflare/cloudflared:2025.11.1 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: outline/charts/outline/templates/common.yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: outline + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: outline + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: outline + helm.sh/chart: outline-4.4.0 + namespace: outline +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: outline + app.kubernetes.io/instance: outline + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: outline + app.kubernetes.io/name: outline + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: NODE_ENV + value: production + - name: URL + value: https://wiki.alexlebens.dev + - name: PORT + value: "3000" + - name: SECRET_KEY + valueFrom: + secretKeyRef: + key: secret-key + name: outline-key-secret + - name: UTILS_SECRET + valueFrom: + secretKeyRef: + key: utils-key + name: outline-key-secret + - name: POSTGRES_USERNAME + valueFrom: + secretKeyRef: + key: username + name: outline-postgresql-17-cluster-app + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: outline-postgresql-17-cluster-app + - name: POSTGRES_DATABASE_NAME + valueFrom: + secretKeyRef: + key: dbname + name: outline-postgresql-17-cluster-app + - name: POSTGRES_DATABASE_HOST + valueFrom: + secretKeyRef: + key: host + name: outline-postgresql-17-cluster-app + - name: POSTGRES_DATABASE_PORT + valueFrom: + secretKeyRef: + key: port + name: outline-postgresql-17-cluster-app + - name: DATABASE_URL + value: postgres://$(POSTGRES_USERNAME):$(POSTGRES_PASSWORD)@$(POSTGRES_DATABASE_HOST):$(POSTGRES_DATABASE_PORT)/$(POSTGRES_DATABASE_NAME) + - name: DATABASE_URL_TEST + value: postgres://$(POSTGRES_USERNAME):$(POSTGRES_PASSWORD)@$(POSTGRES_DATABASE_HOST):$(POSTGRES_DATABASE_PORT)/$(POSTGRES_DATABASE_NAME)-test + - name: DATABASE_CONNECTION_POOL_MIN + value: "2" + - name: DATABASE_CONNECTION_POOL_MAX + value: "20" + - name: PGSSLMODE + value: disable + - name: REDIS_URL + value: redis://redis-replication-outline-master.outline:6379 + - name: FILE_STORAGE + value: s3 + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + key: AWS_ACCESS_KEY_ID + name: ceph-bucket-outline + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: AWS_SECRET_ACCESS_KEY + name: ceph-bucket-outline + - name: AWS_REGION + value: us-east-1 + - name: AWS_S3_UPLOAD_BUCKET_NAME + valueFrom: + configMapKeyRef: + key: BUCKET_NAME + name: ceph-bucket-outline + - name: AWS_S3_UPLOAD_BUCKET_URL + value: https://objects.alexlebens.dev + - name: AWS_S3_FORCE_PATH_STYLE + value: "true" + - name: AWS_S3_ACL + value: private + - name: FILE_STORAGE_UPLOAD_MAX_SIZE + value: "26214400" + - name: FORCE_HTTPS + value: "false" + - name: ENABLE_UPDATES + value: "false" + - name: WEB_CONCURRENCY + value: "1" + - name: FILE_STORAGE_IMPORT_MAX_SIZE + value: "5.12e+06" + - name: LOG_LEVEL + value: info + - name: DEFAULT_LANGUAGE + value: en_US + - name: RATE_LIMITER_ENABLED + value: "false" + - name: DEVELOPMENT_UNSAFE_INLINE_CSP + value: "false" + - name: OIDC_CLIENT_ID + valueFrom: + secretKeyRef: + key: client + name: outline-oidc-secret + - name: OIDC_CLIENT_SECRET + valueFrom: + secretKeyRef: + key: secret + name: outline-oidc-secret + - name: OIDC_AUTH_URI + value: https://auth.alexlebens.dev/application/o/authorize/ + - name: OIDC_TOKEN_URI + value: https://auth.alexlebens.dev/application/o/token/ + - name: OIDC_USERINFO_URI + value: https://auth.alexlebens.dev/application/o/userinfo/ + - name: OIDC_USERNAME_CLAIM + value: email + - name: OIDC_DISPLAY_NAME + value: Authentik + - name: OIDC_SCOPES + value: openid profile email + image: outlinewiki/outline:1.1.0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 512Mi +--- +# Source: outline/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: outline-postgresql-17-cluster + namespace: outline + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: outline-postgresql-17 + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "outline-postgresql-17-external-backup" + serverName: "outline-postgresql-17-backup-1" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "outline-postgresql-17-garage-local-backup" + serverName: "outline-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "outline-postgresql-17-recovery" + serverName: outline-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 256Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: outline-postgresql-17-backup-1 + + externalClusters: + - name: outline-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "outline-postgresql-17-recovery" + serverName: outline-postgresql-17-backup-1 +--- +# Source: outline/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: outline-key-secret + namespace: outline + labels: + app.kubernetes.io/name: outline-key-secret + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: secret-key + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/outline/key + metadataPolicy: None + property: secret-key + - secretKey: utils-key + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/outline/key + metadataPolicy: None + property: utils-key +--- +# Source: outline/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: outline-oidc-secret + namespace: outline + labels: + app.kubernetes.io/name: outline-oidc-secret + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: client + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/outline + metadataPolicy: None + property: client + - secretKey: secret + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/outline + metadataPolicy: None + property: secret +--- +# Source: outline/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: outline-cloudflared-secret + namespace: outline + labels: + app.kubernetes.io/name: outline-cloudflared-secret + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: cf-tunnel-token + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cloudflare/tunnels/outline + metadataPolicy: None + property: token +--- +# Source: outline/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: outline-postgresql-17-cluster-backup-secret + namespace: outline + labels: + app.kubernetes.io/name: outline-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: outline/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: outline-postgresql-17-cluster-backup-secret-garage + namespace: outline + labels: + app.kubernetes.io/name: outline-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: outline/templates/object-bucket-claim.yaml +apiVersion: objectbucket.io/v1alpha1 +kind: ObjectBucketClaim +metadata: + name: ceph-bucket-outline + labels: + app.kubernetes.io/name: ceph-bucket-outline + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline +spec: + generateBucketName: bucket-outline + storageClassName: ceph-bucket + additionalConfig: + bucketPolicy: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor", + "Effect": "Allow", + "Action": [ + "s3:GetObjectAcl", + "s3:DeleteObject", + "s3:PutObject", + "s3:GetObject", + "s3:PutObjectAcl" + ], + "Resource": "arn:aws:s3:::bucket-outline-630c57e0-d475-4d78-926c-c1c082291d73/*" + } + ] + } +--- +# Source: outline/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "outline-postgresql-17-external-backup" + namespace: outline + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: outline-postgresql-17 + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/outline/outline-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: outline-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: outline-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: outline/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "outline-postgresql-17-garage-local-backup" + namespace: outline + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: outline-postgresql-17 + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/outline/outline-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: outline-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: outline-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: outline-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: outline/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "outline-postgresql-17-recovery" + namespace: outline + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: outline-postgresql-17 + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/outline/outline-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: outline-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: outline-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: outline/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: outline-postgresql-17-alert-rules + namespace: outline + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: outline-postgresql-17 + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/outline-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="outline"} - cnpg_pg_replication_is_wal_receiver_up{namespace="outline"}) < 1 + for: 5m + labels: + severity: critical + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="outline"} - cnpg_pg_replication_is_wal_receiver_up{namespace="outline"}) < 2 + for: 5m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "outline/outline-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "outline/outline-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "outline/outline-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="outline",pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "outline/outline-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "outline/outline-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "outline/outline-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "outline/outline-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="outline",pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "outline/outline-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster +--- +# Source: outline/templates/redis-replication.yaml +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisReplication +metadata: + name: redis-replication-outline + namespace: outline + labels: + app.kubernetes.io/name: redis-replication-outline + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline +spec: + clusterSize: 3 + podSecurityContext: + runAsUser: 1000 + fsGroup: 1000 + kubernetesConfig: + image: quay.io/opstree/redis:v8.0.3 + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 50m + memory: 128Mi + storage: + volumeClaimTemplate: + spec: + storageClassName: ceph-block + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi + redisExporter: + enabled: true + image: quay.io/opstree/redis-exporter:v1.48.0 +--- +# Source: outline/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "outline-postgresql-17-daily-backup-scheduled-backup" + namespace: outline + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: outline-postgresql-17 + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: outline-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "outline-postgresql-17-external-backup" +--- +# Source: outline/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "outline-postgresql-17-live-backup-scheduled-backup" + namespace: outline + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: outline-postgresql-17 + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: outline-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "outline-postgresql-17-garage-local-backup" +--- +# Source: outline/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: redis-replication-outline + namespace: outline + labels: + app.kubernetes.io/name: redis-replication-outline + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline + redis-operator: "true" + env: production +spec: + selector: + matchLabels: + redis_setup_type: replication + endpoints: + - port: redis-exporter + interval: 30s + scrapeTimeout: 10s diff --git a/clusters/cl01tl/manifests/overseerr/overseerr.yaml b/clusters/cl01tl/manifests/overseerr/overseerr.yaml new file mode 100644 index 000000000..3e45d4702 --- /dev/null +++ b/clusters/cl01tl/manifests/overseerr/overseerr.yaml @@ -0,0 +1,215 @@ +--- +# Source: overseerr/charts/app-template/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: overseerr-main + labels: + app.kubernetes.io/instance: overseerr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: overseerr + helm.sh/chart: app-template-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: overseerr +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "10Gi" + storageClassName: "ceph-block" +--- +# Source: overseerr/charts/app-template/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: overseerr + labels: + app.kubernetes.io/instance: overseerr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: overseerr + app.kubernetes.io/service: overseerr + helm.sh/chart: app-template-4.4.0 + namespace: overseerr +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 5055 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: overseerr + app.kubernetes.io/name: overseerr +--- +# Source: overseerr/charts/app-template/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: overseerr + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: overseerr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: overseerr + helm.sh/chart: app-template-4.4.0 + namespace: overseerr +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: overseerr + app.kubernetes.io/instance: overseerr + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: overseerr + app.kubernetes.io/name: overseerr + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + image: ghcr.io/sct/overseerr:1.34.0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 512Mi + volumeMounts: + - mountPath: /app/config + name: main + volumes: + - name: main + persistentVolumeClaim: + claimName: overseerr-main +--- +# Source: overseerr/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: overseerr-main-backup-secret + namespace: overseerr + labels: + app.kubernetes.io/name: overseerr-main-backup-secret + app.kubernetes.io/instance: overseerr + app.kubernetes.io/part-of: overseerr +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/overseerr/overseerr-main" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: overseerr/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-overseerr + namespace: overseerr + labels: + app.kubernetes.io/name: http-route-overseerr + app.kubernetes.io/instance: overseerr + app.kubernetes.io/part-of: overseerr +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - overseerr.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: overseerr + port: 80 + weight: 100 +--- +# Source: overseerr/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: overseerr-main-backup-source + namespace: overseerr + labels: + app.kubernetes.io/name: overseerr-main-backup-source + app.kubernetes.io/instance: overseerr + app.kubernetes.io/part-of: overseerr +spec: + sourcePVC: overseerr-main + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: overseerr-main-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot diff --git a/clusters/cl01tl/manifests/photoview/photoview.yaml b/clusters/cl01tl/manifests/photoview/photoview.yaml new file mode 100644 index 000000000..6ae0d2dc8 --- /dev/null +++ b/clusters/cl01tl/manifests/photoview/photoview.yaml @@ -0,0 +1,773 @@ +--- +# Source: photoview/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: photoview-nfs-storage + namespace: photoview + labels: + app.kubernetes.io/name: photoview-nfs-storage + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage/Pictures + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: photoview/charts/photoview/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: photoview-cache + labels: + app.kubernetes.io/instance: photoview + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: photoview + helm.sh/chart: photoview-4.4.0 + namespace: photoview +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "10Gi" + storageClassName: "ceph-block" +--- +# Source: photoview/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: photoview-nfs-storage + namespace: photoview + labels: + app.kubernetes.io/name: photoview-nfs-storage + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview +spec: + volumeName: photoview-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: photoview/charts/photoview/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: photoview + labels: + app.kubernetes.io/instance: photoview + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: photoview + app.kubernetes.io/service: photoview + helm.sh/chart: photoview-4.4.0 + namespace: photoview +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: photoview + app.kubernetes.io/name: photoview +--- +# Source: photoview/charts/photoview/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: photoview + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: photoview + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: photoview + helm.sh/chart: photoview-4.4.0 + namespace: photoview +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: photoview + app.kubernetes.io/instance: photoview + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: photoview + app.kubernetes.io/name: photoview + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + initContainers: + - command: + - /bin/sh + - -ec + - | + /bin/chown -R 999:999 /app/cache + image: busybox:1.37.0 + imagePullPolicy: IfNotPresent + name: init-chmod-data + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: /app/cache + name: cache + containers: + - env: + - name: PHOTOVIEW_DATABASE_DRIVER + value: postgres + - name: PHOTOVIEW_POSTGRES_URL + valueFrom: + secretKeyRef: + key: uri + name: photoview-postgresql-17-cluster-app + - name: PHOTOVIEW_MEDIA_CACHE + value: /app/cache + image: photoview/photoview:2.4.0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 512Mi + volumeMounts: + - mountPath: /app/cache + name: cache + - mountPath: /photos + name: media + readOnly: true + volumes: + - name: cache + persistentVolumeClaim: + claimName: photoview-cache + - name: media + persistentVolumeClaim: + claimName: photoview-nfs-storage +--- +# Source: photoview/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: photoview-postgresql-17-cluster + namespace: photoview + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: photoview-postgresql-17 + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "photoview-postgresql-17-external-backup" + serverName: "photoview-postgresql-17-backup-1" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "photoview-postgresql-17-garage-local-backup" + serverName: "photoview-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "photoview-postgresql-17-recovery" + serverName: photoview-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 256Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: photoview-postgresql-17-backup-1 + + externalClusters: + - name: photoview-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "photoview-postgresql-17-recovery" + serverName: photoview-postgresql-17-backup-1 +--- +# Source: photoview/templates/external-secrets.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: photoview-postgresql-17-cluster-backup-secret + namespace: photoview + labels: + app.kubernetes.io/name: photoview-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: photoview/templates/external-secrets.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: photoview-postgresql-17-cluster-backup-secret-garage + namespace: photoview + labels: + app.kubernetes.io/name: photoview-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: photoview/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-photoview + namespace: photoview + labels: + app.kubernetes.io/name: http-route-photoview + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - photoview.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: photoview + port: 80 + weight: 100 +--- +# Source: photoview/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "photoview-postgresql-17-external-backup" + namespace: photoview + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: photoview-postgresql-17 + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/photoview/photoview-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: photoview-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: photoview-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: photoview/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "photoview-postgresql-17-garage-local-backup" + namespace: photoview + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: photoview-postgresql-17 + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/photoview/photoview-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: photoview-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: photoview-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: photoview-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: photoview/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "photoview-postgresql-17-recovery" + namespace: photoview + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: photoview-postgresql-17 + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/photoview/photoview-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: photoview-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: photoview-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: photoview/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: photoview-postgresql-17-alert-rules + namespace: photoview + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: photoview-postgresql-17 + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/photoview-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="photoview"} - cnpg_pg_replication_is_wal_receiver_up{namespace="photoview"}) < 1 + for: 5m + labels: + severity: critical + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="photoview"} - cnpg_pg_replication_is_wal_receiver_up{namespace="photoview"}) < 2 + for: 5m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="photoview",pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="photoview",pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster +--- +# Source: photoview/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "photoview-postgresql-17-daily-backup-scheduled-backup" + namespace: photoview + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: photoview-postgresql-17 + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: photoview-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "photoview-postgresql-17-external-backup" +--- +# Source: photoview/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "photoview-postgresql-17-live-backup-scheduled-backup" + namespace: photoview + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: photoview-postgresql-17 + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: photoview-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "photoview-postgresql-17-garage-local-backup" diff --git a/clusters/cl01tl/manifests/plex/plex.yaml b/clusters/cl01tl/manifests/plex/plex.yaml new file mode 100644 index 000000000..a05f2bcc7 --- /dev/null +++ b/clusters/cl01tl/manifests/plex/plex.yaml @@ -0,0 +1,190 @@ +--- +# Source: plex/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: plex-nfs-storage + namespace: plex + labels: + app.kubernetes.io/name: plex-nfs-storage + app.kubernetes.io/instance: plex + app.kubernetes.io/part-of: plex +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: plex/charts/plex/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: plex-config + labels: + app.kubernetes.io/instance: plex + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: plex + helm.sh/chart: plex-4.4.0 + namespace: plex +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "100Gi" + storageClassName: "ceph-block" +--- +# Source: plex/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: plex-nfs-storage + namespace: plex + labels: + app.kubernetes.io/name: plex-nfs-storage + app.kubernetes.io/instance: plex + app.kubernetes.io/part-of: plex +spec: + volumeName: plex-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: plex/charts/plex/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: plex + labels: + app.kubernetes.io/instance: plex + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: plex + app.kubernetes.io/service: plex + helm.sh/chart: plex-4.4.0 + namespace: plex +spec: + type: LoadBalancer + ports: + - port: 32400 + targetPort: 32400 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: plex + app.kubernetes.io/name: plex +--- +# Source: plex/charts/plex/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: plex + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: plex + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: plex + helm.sh/chart: plex-4.4.0 + namespace: plex +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: plex + app.kubernetes.io/instance: plex + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: plex + app.kubernetes.io/name: plex + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: VERSION + value: docker + - name: PLEX_CLAIM + value: claim-XmGK2o9x54PbCzQaqj-J + image: ghcr.io/linuxserver/plex:1.42.2@sha256:ab81c7313fb5dc4d1f9562e5bbd5e5877a8a3c5ca6b9f9fff3437b5096a2b123 + imagePullPolicy: IfNotPresent + name: main + resources: + limits: + gpu.intel.com/i915: 1 + requests: + cpu: 10m + gpu.intel.com/i915: 1 + memory: 512Mi + volumeMounts: + - mountPath: /config + name: config + - mountPath: /mnt/store + name: media + readOnly: true + - mountPath: /transcode + name: transcode + volumes: + - name: config + persistentVolumeClaim: + claimName: plex-config + - name: media + persistentVolumeClaim: + claimName: plex-nfs-storage + - emptyDir: {} + name: transcode +--- +# Source: plex/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-plex + namespace: plex + labels: + app.kubernetes.io/name: http-route-plex + app.kubernetes.io/instance: plex + app.kubernetes.io/part-of: plex +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - plex.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: plex + port: 32400 + weight: 100 diff --git a/clusters/cl01tl/manifests/postiz/postiz.yaml b/clusters/cl01tl/manifests/postiz/postiz.yaml new file mode 100644 index 000000000..767db4ca6 --- /dev/null +++ b/clusters/cl01tl/manifests/postiz/postiz.yaml @@ -0,0 +1,1180 @@ +--- +# Source: postiz/charts/postiz/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: postiz-config + labels: + app.kubernetes.io/instance: postiz + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: postiz + helm.sh/chart: postiz-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: postiz +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "2Gi" + storageClassName: "ceph-block" +--- +# Source: postiz/charts/postiz/templates/common.yaml +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: postiz-uploads + labels: + app.kubernetes.io/instance: postiz + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: postiz + helm.sh/chart: postiz-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: postiz +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "10Gi" + storageClassName: "ceph-block" +--- +# Source: postiz/charts/postiz/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: postiz + labels: + app.kubernetes.io/instance: postiz + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: postiz + app.kubernetes.io/service: postiz + helm.sh/chart: postiz-4.4.0 + namespace: postiz +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 5000 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: postiz + app.kubernetes.io/name: postiz +--- +# Source: postiz/charts/cloudflared/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: postiz-cloudflared-postiz + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: postiz + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cloudflared-postiz + app.kubernetes.io/version: 2025.10.0 + helm.sh/chart: cloudflared-1.23.0 + namespace: postiz +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: cloudflared-postiz + app.kubernetes.io/instance: postiz + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: postiz + app.kubernetes.io/name: cloudflared-postiz + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - args: + - tunnel + - --protocol + - http2 + - --no-autoupdate + - run + - --token + - $(CF_MANAGED_TUNNEL_TOKEN) + env: + - name: CF_MANAGED_TUNNEL_TOKEN + valueFrom: + secretKeyRef: + key: cf-tunnel-token + name: postiz-cloudflared-secret + image: cloudflare/cloudflared:2025.11.1 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: postiz/charts/postiz/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: postiz + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: postiz + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: postiz + helm.sh/chart: postiz-4.4.0 + namespace: postiz +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: postiz + app.kubernetes.io/instance: postiz + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: postiz + app.kubernetes.io/name: postiz + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: MAIN_URL + value: https://postiz.alexlebens.dev + - name: FRONTEND_URL + value: https://postiz.alexlebens.dev + - name: NEXT_PUBLIC_BACKEND_URL + value: https://postiz.alexlebens.dev/api + - name: JWT_SECRET + valueFrom: + secretKeyRef: + key: JWT_SECRET + name: postiz-config-secret + - name: DATABASE_URL + valueFrom: + secretKeyRef: + key: uri + name: postiz-postgresql-17-cluster-app + - name: REDIS_URL + valueFrom: + secretKeyRef: + key: REDIS_URL + name: postiz-redis-config + - name: BACKEND_INTERNAL_URL + value: http://localhost:3000 + - name: IS_GENERAL + value: "true" + - name: STORAGE_PROVIDER + value: local + - name: UPLOAD_DIRECTORY + value: /uploads + - name: NEXT_PUBLIC_UPLOAD_DIRECTORY + value: /uploads + - name: NEXT_PUBLIC_POSTIZ_OAUTH_DISPLAY_NAME + value: Authentik + - name: NEXT_PUBLIC_POSTIZ_OAUTH_LOGO_URL + value: https://cdn.jsdelivr.net/gh/selfhst/icons/png/authentik.png + - name: POSTIZ_GENERIC_OAUTH + value: "true" + - name: POSTIZ_OAUTH_URL + value: https://auth.alexlebens.dev + - name: POSTIZ_OAUTH_AUTH_URL + value: https://auth.alexlebens.dev/application/o/authorize/ + - name: POSTIZ_OAUTH_TOKEN_URL + value: https://auth.alexlebens.dev/application/o/token/ + - name: POSTIZ_OAUTH_USERINFO_URL + value: https://auth.alexlebens.dev/application/o/userinfo/ + - name: POSTIZ_OAUTH_CLIENT_ID + valueFrom: + secretKeyRef: + key: client + name: postiz-oidc-secret + - name: POSTIZ_OAUTH_CLIENT_SECRET + valueFrom: + secretKeyRef: + key: secret + name: postiz-oidc-secret + - name: POSTIZ_OAUTH_SCOPE + value: openid profile email + image: ghcr.io/gitroomhq/postiz-app:v2.8.3 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /config + name: config + - mountPath: /uploads + name: uploads + volumes: + - name: config + persistentVolumeClaim: + claimName: postiz-config + - name: uploads + persistentVolumeClaim: + claimName: postiz-uploads +--- +# Source: postiz/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: postiz-postgresql-17-cluster + namespace: postiz + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: postiz-postgresql-17 + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "postiz-postgresql-17-external-backup" + serverName: "postiz-postgresql-17-backup-2" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "postiz-postgresql-17-garage-local-backup" + serverName: "postiz-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "postiz-postgresql-17-recovery" + serverName: postiz-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 256Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: postiz-postgresql-17-backup-1 + + externalClusters: + - name: postiz-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "postiz-postgresql-17-recovery" + serverName: postiz-postgresql-17-backup-1 +--- +# Source: postiz/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: postiz-config-secret + namespace: postiz + labels: + app.kubernetes.io/name: postiz-config-secret + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: JWT_SECRET + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/postiz/config + metadataPolicy: None + property: JWT_SECRET +--- +# Source: postiz/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: postiz-redis-config + namespace: postiz + labels: + app.kubernetes.io/name: postiz-redis-config + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: REDIS_URL + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/postiz/redis + metadataPolicy: None + property: REDIS_URL + - secretKey: user + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/postiz/redis + metadataPolicy: None + property: user + - secretKey: password + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/postiz/redis + metadataPolicy: None + property: password +--- +# Source: postiz/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: postiz-oidc-secret + namespace: postiz + labels: + app.kubernetes.io/name: postiz-oidc-secret + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: client + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/postiz + metadataPolicy: None + property: client + - secretKey: secret + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/postiz + metadataPolicy: None + property: secret +--- +# Source: postiz/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: postiz-config-backup-secret + namespace: postiz + labels: + app.kubernetes.io/name: postiz-config-backup-secret + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/postiz/postiz-config" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: postiz/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: postiz-uploads-backup-secret + namespace: postiz + labels: + app.kubernetes.io/name: postiz-uploads-backup-secret + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/postiz/postiz-uploads" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: postiz/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: postiz-cloudflared-secret + namespace: postiz + labels: + app.kubernetes.io/name: postiz-cloudflared-secret + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: cf-tunnel-token + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cloudflare/tunnels/postiz + metadataPolicy: None + property: token +--- +# Source: postiz/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: postiz-postgresql-17-cluster-backup-secret + namespace: postiz + labels: + app.kubernetes.io/name: postiz-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: postiz/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: postiz-postgresql-17-cluster-backup-secret-garage + namespace: postiz + labels: + app.kubernetes.io/name: postiz-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: postiz/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-postiz + namespace: postiz + labels: + app.kubernetes.io/name: http-route-postiz + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - postiz.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: postiz + port: 80 + weight: 100 +--- +# Source: postiz/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "postiz-postgresql-17-external-backup" + namespace: postiz + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: postiz-postgresql-17 + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/postiz/postiz-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: postiz-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: postiz-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: postiz/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "postiz-postgresql-17-garage-local-backup" + namespace: postiz + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: postiz-postgresql-17 + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/postiz/postiz-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: postiz-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: postiz-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: postiz-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: postiz/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "postiz-postgresql-17-recovery" + namespace: postiz + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: postiz-postgresql-17 + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/postiz/postiz-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: postiz-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: postiz-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: postiz/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: postiz-postgresql-17-alert-rules + namespace: postiz + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: postiz-postgresql-17 + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/postiz-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="postiz"} - cnpg_pg_replication_is_wal_receiver_up{namespace="postiz"}) < 1 + for: 5m + labels: + severity: critical + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="postiz"} - cnpg_pg_replication_is_wal_receiver_up{namespace="postiz"}) < 2 + for: 5m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "postiz/postiz-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="postiz", pod=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="postiz", pod=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "postiz/postiz-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="postiz", pod=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="postiz", pod=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "postiz/postiz-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="postiz",pod=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "postiz/postiz-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="postiz", pod=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "postiz/postiz-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "postiz/postiz-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "postiz/postiz-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="postiz",pod=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "postiz/postiz-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="postiz", pod=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster +--- +# Source: postiz/templates/redis-replication.yaml +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisReplication +metadata: + name: redis-replication-postiz + namespace: postiz + labels: + app.kubernetes.io/name: redis-replication-postiz + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + clusterSize: 3 + podSecurityContext: + runAsUser: 1000 + fsGroup: 1000 + kubernetesConfig: + image: quay.io/opstree/redis:v8.0.3 + imagePullPolicy: IfNotPresent + redisSecret: + name: postiz-redis-config + key: password + resources: + requests: + cpu: 50m + memory: 128Mi + storage: + volumeClaimTemplate: + spec: + storageClassName: ceph-block + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi + redisExporter: + enabled: true + image: quay.io/opstree/redis-exporter:v1.48.0 +--- +# Source: postiz/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: postiz-config-backup-source + namespace: postiz + labels: + app.kubernetes.io/name: postiz-config-backup-source + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + sourcePVC: postiz-config + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: postiz-config-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: postiz/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: postiz-uploads-backup-source + namespace: postiz + labels: + app.kubernetes.io/name: postiz-uploads-backup-source + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + sourcePVC: postiz-uploads + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: postiz-uploads-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: postiz/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "postiz-postgresql-17-daily-backup-scheduled-backup" + namespace: postiz + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: postiz-postgresql-17 + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: postiz-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "postiz-postgresql-17-external-backup" +--- +# Source: postiz/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "postiz-postgresql-17-live-backup-scheduled-backup" + namespace: postiz + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: postiz-postgresql-17 + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: postiz-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "postiz-postgresql-17-garage-local-backup" +--- +# Source: postiz/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: redis-replication-postiz + namespace: postiz + labels: + app.kubernetes.io/name: redis-replication-postiz + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz + redis-operator: "true" + env: production +spec: + selector: + matchLabels: + redis_setup_type: replication + endpoints: + - port: redis-exporter + interval: 30s + scrapeTimeout: 10s diff --git a/clusters/cl01tl/manifests/prowlarr/prowlarr.yaml b/clusters/cl01tl/manifests/prowlarr/prowlarr.yaml new file mode 100644 index 000000000..a080129ea --- /dev/null +++ b/clusters/cl01tl/manifests/prowlarr/prowlarr.yaml @@ -0,0 +1,235 @@ +--- +# Source: prowlarr/charts/prowlarr/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: prowlarr-config + labels: + app.kubernetes.io/instance: prowlarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: prowlarr + helm.sh/chart: prowlarr-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: prowlarr +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: prowlarr/charts/prowlarr/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: prowlarr + labels: + app.kubernetes.io/instance: prowlarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: prowlarr + app.kubernetes.io/service: prowlarr + helm.sh/chart: prowlarr-4.4.0 + namespace: prowlarr +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 9696 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: prowlarr + app.kubernetes.io/name: prowlarr +--- +# Source: prowlarr/charts/prowlarr/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: prowlarr + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: prowlarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: prowlarr + helm.sh/chart: prowlarr-4.4.0 + namespace: prowlarr +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: prowlarr + app.kubernetes.io/instance: prowlarr + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: prowlarr + app.kubernetes.io/name: prowlarr + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + securityContext: + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + runAsGroup: 568 + runAsUser: 568 + supplementalGroups: + - 44 + - 100 + - 109 + - 65539 + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + image: ghcr.io/linuxserver/prowlarr:2.3.0@sha256:475853535de3de8441b87c1457c30f2e695f4831228b12b6b7274e9da409d874 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /config + name: config + volumes: + - name: config + persistentVolumeClaim: + claimName: prowlarr-config +--- +# Source: prowlarr/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: prowlarr-config-backup-secret + namespace: prowlarr + labels: + app.kubernetes.io/name: prowlarr-config-backup-secret + app.kubernetes.io/instance: prowlarr + app.kubernetes.io/part-of: prowlarr +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/prowlarr/prowlarr-config" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: prowlarr/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-prowlarr + namespace: prowlarr + labels: + app.kubernetes.io/name: http-route-prowlarr + app.kubernetes.io/instance: prowlarr + app.kubernetes.io/part-of: prowlarr +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - prowlarr.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: prowlarr + port: 80 + weight: 100 +--- +# Source: prowlarr/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: prowlarr-config-backup-source + namespace: prowlarr + labels: + app.kubernetes.io/name: prowlarr-config-backup-source + app.kubernetes.io/instance: prowlarr + app.kubernetes.io/part-of: prowlarr +spec: + sourcePVC: prowlarr-config + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: prowlarr-config-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + moverSecurityContext: + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + supplementalGroups: + - 44 + - 100 + - 109 + - 65539 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot diff --git a/clusters/cl01tl/manifests/radarr-4k/radarr-4k.yaml b/clusters/cl01tl/manifests/radarr-4k/radarr-4k.yaml new file mode 100644 index 000000000..b7a0d3b0c --- /dev/null +++ b/clusters/cl01tl/manifests/radarr-4k/radarr-4k.yaml @@ -0,0 +1,930 @@ +--- +# Source: radarr-4k/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: radarr-4k-nfs-storage + namespace: radarr-4k + labels: + app.kubernetes.io/name: radarr-4k-nfs-storage + app.kubernetes.io/instance: radarr-4k + app.kubernetes.io/part-of: radarr-4k +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: radarr-4k/charts/radarr-4k/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: radarr-4k-config + labels: + app.kubernetes.io/instance: radarr-4k + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: radarr-4k + helm.sh/chart: radarr-4k-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: radarr-4k +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "20Gi" + storageClassName: "ceph-block" +--- +# Source: radarr-4k/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: radarr-4k-nfs-storage + namespace: radarr-4k + labels: + app.kubernetes.io/name: radarr-4k-nfs-storage + app.kubernetes.io/instance: radarr-4k + app.kubernetes.io/part-of: radarr-4k +spec: + volumeName: radarr-4k-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: radarr-4k/charts/radarr-4k/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: radarr-4k + labels: + app.kubernetes.io/instance: radarr-4k + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: radarr-4k + app.kubernetes.io/service: radarr-4k + helm.sh/chart: radarr-4k-4.4.0 + namespace: radarr-4k +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 7878 + protocol: TCP + name: http + - port: 9793 + targetPort: 9793 + protocol: TCP + name: metrics + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: radarr-4k + app.kubernetes.io/name: radarr-4k +--- +# Source: radarr-4k/charts/radarr-4k/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: radarr-4k + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: radarr-4k + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: radarr-4k + helm.sh/chart: radarr-4k-4.4.0 + namespace: radarr-4k +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: radarr-4k + app.kubernetes.io/instance: radarr-4k + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: radarr-4k + app.kubernetes.io/name: radarr-4k + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + securityContext: + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + runAsGroup: 1000 + runAsUser: 1000 + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: PUID + value: "1000" + - name: PGID + value: "1000" + image: ghcr.io/linuxserver/radarr:6.0.4@sha256:06ac318ecb95a34c7b229568dcb4271f02cb5007bb189a0dd67a2032864187ca + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /config + name: config + - mountPath: /mnt/store + name: media + - args: + - radarr + env: + - name: URL + value: http://localhost + - name: CONFIG + value: /config/config.xml + - name: PORT + value: "9793" + - name: ENABLE_ADDITIONAL_METRICS + value: "false" + - name: ENABLE_UNKNOWN_QUEUE_ITEMS + value: "false" + image: ghcr.io/onedr0p/exportarr:v2.3.0 + imagePullPolicy: IfNotPresent + name: metrics + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /config + name: config + readOnly: true + volumes: + - name: config + persistentVolumeClaim: + claimName: radarr-4k-config + - name: media + persistentVolumeClaim: + claimName: radarr-4k-nfs-storage +--- +# Source: radarr-4k/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: radarr5-4k-postgresql-17-cluster + namespace: radarr-4k + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-4k-postgresql-17 + app.kubernetes.io/instance: radarr-4k + app.kubernetes.io/part-of: radarr-4k + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "radarr5-4k-postgresql-17-external-backup" + serverName: "radarr5-4k-postgresql-17-backup-1" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "radarr5-4k-postgresql-17-garage-local-backup" + serverName: "radarr5-4k-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "radarr5-4k-postgresql-17-recovery" + serverName: radarr5-4k-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 200m + memory: 1Gi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: radarr5-4k-postgresql-17-backup-1 + + externalClusters: + - name: radarr5-4k-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "radarr5-4k-postgresql-17-recovery" + serverName: radarr5-4k-postgresql-17-backup-1 +--- +# Source: radarr-4k/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: radarr-4k-config-backup-secret + namespace: radarr-4k + labels: + app.kubernetes.io/name: radarr-4k-config-backup-secret + app.kubernetes.io/instance: radarr-4k + app.kubernetes.io/part-of: radarr-4k +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/radarr5-4k/radarr5-4k-config" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: radarr-4k/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: radarr-4k-postgresql-17-cluster-backup-secret + namespace: radarr-4k + labels: + app.kubernetes.io/name: radarr-4k-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: radarr-4k + app.kubernetes.io/part-of: radarr-4k +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: radarr-4k/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: radarr-4k-postgresql-17-cluster-backup-secret-garage + namespace: radarr-4k + labels: + app.kubernetes.io/name: radarr-4k-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: radarr-4k + app.kubernetes.io/part-of: radarr-4k +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: radarr-4k/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-radarr-4k + namespace: radarr-4k + labels: + app.kubernetes.io/name: http-route-radarr-4k + app.kubernetes.io/instance: radarr-4k + app.kubernetes.io/part-of: radarr-4k +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - radarr-4k.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: radarr-4k + port: 80 + weight: 100 +--- +# Source: radarr-4k/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "radarr5-4k-postgresql-17-external-backup" + namespace: radarr-4k + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-4k-postgresql-17 + app.kubernetes.io/instance: radarr-4k + app.kubernetes.io/part-of: radarr-4k + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/radarr5-4k/radarr5-4k-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: radarr-4k-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: radarr-4k-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: radarr-4k/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "radarr5-4k-postgresql-17-garage-local-backup" + namespace: radarr-4k + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-4k-postgresql-17 + app.kubernetes.io/instance: radarr-4k + app.kubernetes.io/part-of: radarr-4k + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/radarr-4k/radarr5-4k-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: radarr-4k-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: radarr-4k-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: radarr-4k-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: radarr-4k/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "radarr5-4k-postgresql-17-recovery" + namespace: radarr-4k + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-4k-postgresql-17 + app.kubernetes.io/instance: radarr-4k + app.kubernetes.io/part-of: radarr-4k + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/radarr5-4k/radarr5-4k-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: radarr-4k-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: radarr-4k-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: radarr-4k/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: radarr5-4k-postgresql-17-alert-rules + namespace: radarr-4k + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-4k-postgresql-17 + app.kubernetes.io/instance: radarr-4k + app.kubernetes.io/part-of: radarr-4k + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/radarr5-4k-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: radarr-4k + cnpg_cluster: radarr5-4k-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: radarr-4k + cnpg_cluster: radarr5-4k-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-4k"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-4k"}) < 1 + for: 5m + labels: + severity: critical + namespace: radarr-4k + cnpg_cluster: radarr5-4k-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-4k"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-4k"}) < 2 + for: 5m + labels: + severity: warning + namespace: radarr-4k + cnpg_cluster: radarr5-4k-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: radarr-4k + cnpg_cluster: radarr5-4k-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: radarr-4k + cnpg_cluster: radarr5-4k-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="radarr-4k",pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: radarr-4k + cnpg_cluster: radarr5-4k-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: radarr-4k + cnpg_cluster: radarr5-4k-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: radarr-4k + cnpg_cluster: radarr5-4k-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: radarr-4k + cnpg_cluster: radarr5-4k-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: radarr-4k + cnpg_cluster: radarr5-4k-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="radarr-4k",pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: radarr-4k + cnpg_cluster: radarr5-4k-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: radarr-4k + cnpg_cluster: radarr5-4k-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: radarr-4k + cnpg_cluster: radarr5-4k-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: radarr-4k + cnpg_cluster: radarr5-4k-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: radarr-4k + cnpg_cluster: radarr5-4k-postgresql-17-cluster +--- +# Source: radarr-4k/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: radarr-4k + namespace: radarr-4k + labels: + app.kubernetes.io/name: radarr-4k + app.kubernetes.io/instance: radarr-4k + app.kubernetes.io/part-of: radarr-4k +spec: + groups: + - name: radarr-4k + rules: + - alert: ExportarrAbsent + annotations: + description: Radarr 4K Exportarr has disappeared from Prometheus + service discovery. + summary: Exportarr is down. + expr: | + absent(up{job=~".*radarr-4k.*"} == 1) + for: 5m + labels: + severity: critical + - alert: Radarr4kDown + annotations: + description: Radarr 4K service is down. + summary: Radarr 4K is down. + expr: | + radarr_4k_system_status{job=~".*radarr-4k.*"} == 0 + for: 5m + labels: + severity: critical +--- +# Source: radarr-4k/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: radarr-4k-config-backup-source + namespace: radarr-4k + labels: + app.kubernetes.io/name: radarr-4k-config-backup-source + app.kubernetes.io/instance: radarr-4k + app.kubernetes.io/part-of: radarr-4k +spec: + sourcePVC: radarr-4k-config + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: radarr-4k-config-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + moverSecurityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: radarr-4k/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "radarr5-4k-postgresql-17-daily-backup-scheduled-backup" + namespace: radarr-4k + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-4k-postgresql-17 + app.kubernetes.io/instance: radarr-4k + app.kubernetes.io/part-of: radarr-4k + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: radarr5-4k-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "radarr5-4k-postgresql-17-external-backup" +--- +# Source: radarr-4k/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "radarr5-4k-postgresql-17-live-backup-scheduled-backup" + namespace: radarr-4k + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-4k-postgresql-17 + app.kubernetes.io/instance: radarr-4k + app.kubernetes.io/part-of: radarr-4k + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: radarr5-4k-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "radarr5-4k-postgresql-17-garage-local-backup" +--- +# Source: radarr-4k/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: radarr-4k + namespace: radarr-4k + labels: + app.kubernetes.io/name: radarr-4k + app.kubernetes.io/instance: radarr-4k + app.kubernetes.io/part-of: radarr-4k +spec: + selector: + matchLabels: + app.kubernetes.io/name: radarr-4k + app.kubernetes.io/instance: radarr-4k + endpoints: + - port: metrics + interval: 3m + scrapeTimeout: 1m + path: /metrics diff --git a/clusters/cl01tl/manifests/radarr-anime/radarr-anime.yaml b/clusters/cl01tl/manifests/radarr-anime/radarr-anime.yaml new file mode 100644 index 000000000..41a4ba5e0 --- /dev/null +++ b/clusters/cl01tl/manifests/radarr-anime/radarr-anime.yaml @@ -0,0 +1,928 @@ +--- +# Source: radarr-anime/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: radarr-anime-nfs-storage + namespace: radarr-anime + labels: + app.kubernetes.io/name: radarr-anime-nfs-storage + app.kubernetes.io/instance: radarr-anime + app.kubernetes.io/part-of: radarr-anime +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: radarr-anime/charts/radarr-anime/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: radarr-anime-config + labels: + app.kubernetes.io/instance: radarr-anime + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: radarr-anime + helm.sh/chart: radarr-anime-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: radarr-anime +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "20Gi" + storageClassName: "ceph-block" +--- +# Source: radarr-anime/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: radarr-anime-nfs-storage + namespace: radarr-anime + labels: + app.kubernetes.io/name: radarr-anime-nfs-storage + app.kubernetes.io/instance: radarr-anime + app.kubernetes.io/part-of: radarr-anime +spec: + volumeName: radarr-anime-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: radarr-anime/charts/radarr-anime/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: radarr-anime + labels: + app.kubernetes.io/instance: radarr-anime + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: radarr-anime + app.kubernetes.io/service: radarr-anime + helm.sh/chart: radarr-anime-4.4.0 + namespace: radarr-anime +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 7878 + protocol: TCP + name: http + - port: 9793 + targetPort: 9793 + protocol: TCP + name: metrics + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: radarr-anime + app.kubernetes.io/name: radarr-anime +--- +# Source: radarr-anime/charts/radarr-anime/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: radarr-anime + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: radarr-anime + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: radarr-anime + helm.sh/chart: radarr-anime-4.4.0 + namespace: radarr-anime +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: radarr-anime + app.kubernetes.io/instance: radarr-anime + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: radarr-anime + app.kubernetes.io/name: radarr-anime + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + securityContext: + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: PUID + value: "1000" + - name: PGID + value: "1000" + image: ghcr.io/linuxserver/radarr:6.0.4@sha256:06ac318ecb95a34c7b229568dcb4271f02cb5007bb189a0dd67a2032864187ca + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /config + name: config + - mountPath: /mnt/store + name: media + - args: + - radarr + env: + - name: URL + value: http://localhost + - name: CONFIG + value: /config/config.xml + - name: PORT + value: "9793" + - name: ENABLE_ADDITIONAL_METRICS + value: "false" + - name: ENABLE_UNKNOWN_QUEUE_ITEMS + value: "false" + image: ghcr.io/onedr0p/exportarr:v2.3.0 + imagePullPolicy: IfNotPresent + name: metrics + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /config + name: config + readOnly: true + volumes: + - name: config + persistentVolumeClaim: + claimName: radarr-anime-config + - name: media + persistentVolumeClaim: + claimName: radarr-anime-nfs-storage +--- +# Source: radarr-anime/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: radarr5-anime-postgresql-17-cluster + namespace: radarr-anime + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-anime-postgresql-17 + app.kubernetes.io/instance: radarr-anime + app.kubernetes.io/part-of: radarr-anime + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "radarr5-anime-postgresql-17-external-backup" + serverName: "radarr5-anime-postgresql-17-backup-1" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "radarr5-anime-postgresql-17-garage-local-backup" + serverName: "radarr5-anime-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "radarr5-anime-postgresql-17-recovery" + serverName: radarr5-anime-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 256Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: radarr5-anime-postgresql-17-backup-1 + + externalClusters: + - name: radarr5-anime-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "radarr5-anime-postgresql-17-recovery" + serverName: radarr5-anime-postgresql-17-backup-1 +--- +# Source: radarr-anime/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: radarr-anime-config-backup-secret + namespace: radarr-anime + labels: + app.kubernetes.io/name: radarr-anime-config-backup-secret + app.kubernetes.io/instance: radarr-anime + app.kubernetes.io/part-of: radarr-anime +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/radarr5-anime/radarr5-anime-config" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: radarr-anime/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: radarr-anime-postgresql-17-cluster-backup-secret + namespace: radarr-anime + labels: + app.kubernetes.io/name: radarr-anime-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: radarr-anime + app.kubernetes.io/part-of: radarr-anime +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: radarr-anime/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: radarr-anime-postgresql-17-cluster-backup-secret-garage + namespace: radarr-anime + labels: + app.kubernetes.io/name: radarr-anime-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: radarr-anime + app.kubernetes.io/part-of: radarr-anime +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: radarr-anime/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-radarr-anime + namespace: radarr-anime + labels: + app.kubernetes.io/name: http-route-radarr-anime + app.kubernetes.io/instance: radarr-anime + app.kubernetes.io/part-of: radarr-anime +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - radarr-anime.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: radarr-anime + port: 80 + weight: 100 +--- +# Source: radarr-anime/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "radarr5-anime-postgresql-17-external-backup" + namespace: radarr-anime + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-anime-postgresql-17 + app.kubernetes.io/instance: radarr-anime + app.kubernetes.io/part-of: radarr-anime + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/radarr5-anime/radarr5-anime-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: radarr-anime-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: radarr-anime-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: radarr-anime/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "radarr5-anime-postgresql-17-garage-local-backup" + namespace: radarr-anime + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-anime-postgresql-17 + app.kubernetes.io/instance: radarr-anime + app.kubernetes.io/part-of: radarr-anime + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/radarr-anime/radarr5-anime-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: radarr-anime-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: radarr-anime-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: radarr-anime-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: radarr-anime/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "radarr5-anime-postgresql-17-recovery" + namespace: radarr-anime + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-anime-postgresql-17 + app.kubernetes.io/instance: radarr-anime + app.kubernetes.io/part-of: radarr-anime + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/radarr5-anime/radarr5-anime-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: radarr-anime-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: radarr-anime-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: radarr-anime/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: radarr5-anime-postgresql-17-alert-rules + namespace: radarr-anime + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-anime-postgresql-17 + app.kubernetes.io/instance: radarr-anime + app.kubernetes.io/part-of: radarr-anime + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/radarr5-anime-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: radarr-anime + cnpg_cluster: radarr5-anime-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: radarr-anime + cnpg_cluster: radarr5-anime-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-anime"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-anime"}) < 1 + for: 5m + labels: + severity: critical + namespace: radarr-anime + cnpg_cluster: radarr5-anime-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-anime"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-anime"}) < 2 + for: 5m + labels: + severity: warning + namespace: radarr-anime + cnpg_cluster: radarr5-anime-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: radarr-anime + cnpg_cluster: radarr5-anime-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: radarr-anime + cnpg_cluster: radarr5-anime-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="radarr-anime",pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: radarr-anime + cnpg_cluster: radarr5-anime-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: radarr-anime + cnpg_cluster: radarr5-anime-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: radarr-anime + cnpg_cluster: radarr5-anime-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: radarr-anime + cnpg_cluster: radarr5-anime-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: radarr-anime + cnpg_cluster: radarr5-anime-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="radarr-anime",pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: radarr-anime + cnpg_cluster: radarr5-anime-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: radarr-anime + cnpg_cluster: radarr5-anime-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: radarr-anime + cnpg_cluster: radarr5-anime-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: radarr-anime + cnpg_cluster: radarr5-anime-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: radarr-anime + cnpg_cluster: radarr5-anime-postgresql-17-cluster +--- +# Source: radarr-anime/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: radarr-anime + namespace: radarr-anime + labels: + app.kubernetes.io/name: radarr-anime + app.kubernetes.io/instance: radarr-anime + app.kubernetes.io/part-of: radarr-anime +spec: + groups: + - name: radarr-anime + rules: + - alert: ExportarrAbsent + annotations: + description: Radarr Anime Exportarr has disappeared from Prometheus + service discovery. + summary: Exportarr is down. + expr: | + absent(up{job=~".*radarr-anime.*"} == 1) + for: 5m + labels: + severity: critical + - alert: RadarrAnimeDown + annotations: + description: Radarr Anime service is down. + summary: Radarr Anime is down. + expr: | + radarr_anime_system_status{job=~".*radarr-anime.*"} == 0 + for: 5m + labels: + severity: critical +--- +# Source: radarr-anime/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: radarr-anime-config-backup-source + namespace: radarr-anime + labels: + app.kubernetes.io/name: radarr-anime-config-backup-source + app.kubernetes.io/instance: radarr-anime + app.kubernetes.io/part-of: radarr-anime +spec: + sourcePVC: radarr-anime-config + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: radarr-anime-config-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + moverSecurityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: radarr-anime/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "radarr5-anime-postgresql-17-daily-backup-scheduled-backup" + namespace: radarr-anime + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-anime-postgresql-17 + app.kubernetes.io/instance: radarr-anime + app.kubernetes.io/part-of: radarr-anime + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: radarr5-anime-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "radarr5-anime-postgresql-17-external-backup" +--- +# Source: radarr-anime/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "radarr5-anime-postgresql-17-live-backup-scheduled-backup" + namespace: radarr-anime + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-anime-postgresql-17 + app.kubernetes.io/instance: radarr-anime + app.kubernetes.io/part-of: radarr-anime + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: radarr5-anime-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "radarr5-anime-postgresql-17-garage-local-backup" +--- +# Source: radarr-anime/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: radarr-anime + namespace: radarr-anime + labels: + app.kubernetes.io/name: radarr-anime + app.kubernetes.io/instance: radarr-anime + app.kubernetes.io/part-of: radarr-anime +spec: + selector: + matchLabels: + app.kubernetes.io/name: radarr-anime + app.kubernetes.io/instance: radarr-anime + endpoints: + - port: metrics + interval: 3m + scrapeTimeout: 1m + path: /metrics diff --git a/clusters/cl01tl/manifests/radarr-standup/radarr-standup.yaml b/clusters/cl01tl/manifests/radarr-standup/radarr-standup.yaml new file mode 100644 index 000000000..787daa829 --- /dev/null +++ b/clusters/cl01tl/manifests/radarr-standup/radarr-standup.yaml @@ -0,0 +1,928 @@ +--- +# Source: radarr-standup/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: radarr-standup-nfs-storage + namespace: radarr-standup + labels: + app.kubernetes.io/name: radarr-standup-nfs-storage + app.kubernetes.io/instance: radarr-standup + app.kubernetes.io/part-of: radarr-standup +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: radarr-standup/charts/radarr-standup/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: radarr-standup-config + labels: + app.kubernetes.io/instance: radarr-standup + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: radarr-standup + helm.sh/chart: radarr-standup-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: radarr-standup +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "20Gi" + storageClassName: "ceph-block" +--- +# Source: radarr-standup/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: radarr-standup-nfs-storage + namespace: radarr-standup + labels: + app.kubernetes.io/name: radarr-standup-nfs-storage + app.kubernetes.io/instance: radarr-standup + app.kubernetes.io/part-of: radarr-standup +spec: + volumeName: radarr-standup-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: radarr-standup/charts/radarr-standup/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: radarr-standup + labels: + app.kubernetes.io/instance: radarr-standup + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: radarr-standup + app.kubernetes.io/service: radarr-standup + helm.sh/chart: radarr-standup-4.4.0 + namespace: radarr-standup +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 7878 + protocol: TCP + name: http + - port: 9793 + targetPort: 9793 + protocol: TCP + name: metrics + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: radarr-standup + app.kubernetes.io/name: radarr-standup +--- +# Source: radarr-standup/charts/radarr-standup/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: radarr-standup + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: radarr-standup + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: radarr-standup + helm.sh/chart: radarr-standup-4.4.0 + namespace: radarr-standup +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: radarr-standup + app.kubernetes.io/instance: radarr-standup + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: radarr-standup + app.kubernetes.io/name: radarr-standup + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + securityContext: + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: PUID + value: "1000" + - name: PGID + value: "1000" + image: ghcr.io/linuxserver/radarr:6.0.4@sha256:06ac318ecb95a34c7b229568dcb4271f02cb5007bb189a0dd67a2032864187ca + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /config + name: config + - mountPath: /mnt/store + name: media + - args: + - radarr + env: + - name: URL + value: http://localhost + - name: CONFIG + value: /config/config.xml + - name: PORT + value: "9793" + - name: ENABLE_ADDITIONAL_METRICS + value: "false" + - name: ENABLE_UNKNOWN_QUEUE_ITEMS + value: "false" + image: ghcr.io/onedr0p/exportarr:v2.3.0 + imagePullPolicy: IfNotPresent + name: metrics + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /config + name: config + readOnly: true + volumes: + - name: config + persistentVolumeClaim: + claimName: radarr-standup-config + - name: media + persistentVolumeClaim: + claimName: radarr-standup-nfs-storage +--- +# Source: radarr-standup/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: radarr5-standup-postgresql-17-cluster + namespace: radarr-standup + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-standup-postgresql-17 + app.kubernetes.io/instance: radarr-standup + app.kubernetes.io/part-of: radarr-standup + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "radarr5-standup-postgresql-17-external-backup" + serverName: "radarr5-standup-postgresql-17-backup-1" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "radarr5-standup-postgresql-17-garage-local-backup" + serverName: "radarr5-standup-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "radarr5-standup-postgresql-17-recovery" + serverName: radarr5-standup-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 256Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: radarr5-standup-postgresql-17-backup-1 + + externalClusters: + - name: radarr5-standup-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "radarr5-standup-postgresql-17-recovery" + serverName: radarr5-standup-postgresql-17-backup-1 +--- +# Source: radarr-standup/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: radarr-standup-config-backup-secret + namespace: radarr-standup + labels: + app.kubernetes.io/name: radarr-standup-config-backup-secret + app.kubernetes.io/instance: radarr-standup + app.kubernetes.io/part-of: radarr-standup +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/radarr5-standup/radarr5-standup-config" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: radarr-standup/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: radarr-standup-postgresql-17-cluster-backup-secret + namespace: radarr-standup + labels: + app.kubernetes.io/name: radarr-standup-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: radarr-standup + app.kubernetes.io/part-of: radarr-standup +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: radarr-standup/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: radarr-standup-postgresql-17-cluster-backup-secret-garage + namespace: radarr-standup + labels: + app.kubernetes.io/name: radarr-standup-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: radarr-standup + app.kubernetes.io/part-of: radarr-standup +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: radarr-standup/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-radarr-standup + namespace: radarr-standup + labels: + app.kubernetes.io/name: http-route-radarr-standup + app.kubernetes.io/instance: radarr-standup + app.kubernetes.io/part-of: radarr-standup +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - radarr-standup.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: radarr-standup + port: 80 + weight: 100 +--- +# Source: radarr-standup/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "radarr5-standup-postgresql-17-external-backup" + namespace: radarr-standup + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-standup-postgresql-17 + app.kubernetes.io/instance: radarr-standup + app.kubernetes.io/part-of: radarr-standup + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/radarr5-standup/radarr5-standup-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: radarr-standup-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: radarr-standup-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: radarr-standup/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "radarr5-standup-postgresql-17-garage-local-backup" + namespace: radarr-standup + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-standup-postgresql-17 + app.kubernetes.io/instance: radarr-standup + app.kubernetes.io/part-of: radarr-standup + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/radarr-standup/radarr5-standup-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: radarr-standup-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: radarr-standup-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: radarr-standup-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: radarr-standup/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "radarr5-standup-postgresql-17-recovery" + namespace: radarr-standup + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-standup-postgresql-17 + app.kubernetes.io/instance: radarr-standup + app.kubernetes.io/part-of: radarr-standup + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/radarr5-standup/radarr5-standup-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: radarr-standup-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: radarr-standup-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: radarr-standup/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: radarr5-standup-postgresql-17-alert-rules + namespace: radarr-standup + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-standup-postgresql-17 + app.kubernetes.io/instance: radarr-standup + app.kubernetes.io/part-of: radarr-standup + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/radarr5-standup-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: radarr-standup + cnpg_cluster: radarr5-standup-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: radarr-standup + cnpg_cluster: radarr5-standup-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-standup"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-standup"}) < 1 + for: 5m + labels: + severity: critical + namespace: radarr-standup + cnpg_cluster: radarr5-standup-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-standup"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-standup"}) < 2 + for: 5m + labels: + severity: warning + namespace: radarr-standup + cnpg_cluster: radarr5-standup-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: radarr-standup + cnpg_cluster: radarr5-standup-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: radarr-standup + cnpg_cluster: radarr5-standup-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="radarr-standup",pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: radarr-standup + cnpg_cluster: radarr5-standup-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: radarr-standup + cnpg_cluster: radarr5-standup-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: radarr-standup + cnpg_cluster: radarr5-standup-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: radarr-standup + cnpg_cluster: radarr5-standup-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: radarr-standup + cnpg_cluster: radarr5-standup-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="radarr-standup",pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: radarr-standup + cnpg_cluster: radarr5-standup-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: radarr-standup + cnpg_cluster: radarr5-standup-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: radarr-standup + cnpg_cluster: radarr5-standup-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: radarr-standup + cnpg_cluster: radarr5-standup-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: radarr-standup + cnpg_cluster: radarr5-standup-postgresql-17-cluster +--- +# Source: radarr-standup/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: radarr-standup + namespace: radarr-standup + labels: + app.kubernetes.io/name: radarr-standup + app.kubernetes.io/instance: radarr-standup + app.kubernetes.io/part-of: radarr-standup +spec: + groups: + - name: radarr-standup + rules: + - alert: ExportarrAbsent + annotations: + description: Radarr Stand Up Exportarr has disappeared from Prometheus + service discovery. + summary: Exportarr is down. + expr: | + absent(up{job=~".*radarr-standup.*"} == 1) + for: 5m + labels: + severity: critical + - alert: RadarrStandUpDown + annotations: + description: Radarr Stand Up service is down. + summary: Radarr Stand Up is down. + expr: | + radarr_standup_system_status{job=~".*radarr-standup.*"} == 0 + for: 5m + labels: + severity: critical +--- +# Source: radarr-standup/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: radarr-standup-config-backup-source + namespace: radarr-standup + labels: + app.kubernetes.io/name: radarr-standup-config-backup-source + app.kubernetes.io/instance: radarr-standup + app.kubernetes.io/part-of: radarr-standup +spec: + sourcePVC: radarr-standup-config + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: radarr-standup-config-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + moverSecurityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: radarr-standup/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "radarr5-standup-postgresql-17-daily-backup-scheduled-backup" + namespace: radarr-standup + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-standup-postgresql-17 + app.kubernetes.io/instance: radarr-standup + app.kubernetes.io/part-of: radarr-standup + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: radarr5-standup-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "radarr5-standup-postgresql-17-external-backup" +--- +# Source: radarr-standup/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "radarr5-standup-postgresql-17-live-backup-scheduled-backup" + namespace: radarr-standup + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-standup-postgresql-17 + app.kubernetes.io/instance: radarr-standup + app.kubernetes.io/part-of: radarr-standup + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: radarr5-standup-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "radarr5-standup-postgresql-17-garage-local-backup" +--- +# Source: radarr-standup/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: radarr-standup + namespace: radarr-standup + labels: + app.kubernetes.io/name: radarr-standup + app.kubernetes.io/instance: radarr-standup + app.kubernetes.io/part-of: radarr-standup +spec: + selector: + matchLabels: + app.kubernetes.io/name: radarr-standup + app.kubernetes.io/instance: radarr-standup + endpoints: + - port: metrics + interval: 3m + scrapeTimeout: 1m + path: /metrics diff --git a/clusters/cl01tl/manifests/radarr/radarr.yaml b/clusters/cl01tl/manifests/radarr/radarr.yaml new file mode 100644 index 000000000..dbfbcfd97 --- /dev/null +++ b/clusters/cl01tl/manifests/radarr/radarr.yaml @@ -0,0 +1,930 @@ +--- +# Source: radarr/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: radarr-nfs-storage + namespace: radarr + labels: + app.kubernetes.io/name: radarr-nfs-storage + app.kubernetes.io/instance: radarr + app.kubernetes.io/part-of: radarr +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: radarr/charts/radarr/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: radarr-config + labels: + app.kubernetes.io/instance: radarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: radarr + helm.sh/chart: radarr-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: radarr +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "20Gi" + storageClassName: "ceph-block" +--- +# Source: radarr/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: radarr-nfs-storage + namespace: radarr + labels: + app.kubernetes.io/name: radarr-nfs-storage + app.kubernetes.io/instance: radarr + app.kubernetes.io/part-of: radarr +spec: + volumeName: radarr-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: radarr/charts/radarr/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: radarr + labels: + app.kubernetes.io/instance: radarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: radarr + app.kubernetes.io/service: radarr + helm.sh/chart: radarr-4.4.0 + namespace: radarr +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 7878 + protocol: TCP + name: http + - port: 9793 + targetPort: 9793 + protocol: TCP + name: metrics + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: radarr + app.kubernetes.io/name: radarr +--- +# Source: radarr/charts/radarr/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: radarr + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: radarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: radarr + helm.sh/chart: radarr-4.4.0 + namespace: radarr +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: radarr + app.kubernetes.io/instance: radarr + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: radarr + app.kubernetes.io/name: radarr + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + securityContext: + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + runAsGroup: 1000 + runAsUser: 1000 + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: PUID + value: "1000" + - name: PGID + value: "1000" + image: ghcr.io/linuxserver/radarr:6.0.4@sha256:06ac318ecb95a34c7b229568dcb4271f02cb5007bb189a0dd67a2032864187ca + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /config + name: config + - mountPath: /mnt/store + name: media + - args: + - radarr + env: + - name: URL + value: http://localhost + - name: CONFIG + value: /config/config.xml + - name: PORT + value: "9793" + - name: ENABLE_ADDITIONAL_METRICS + value: "false" + - name: ENABLE_UNKNOWN_QUEUE_ITEMS + value: "false" + image: ghcr.io/onedr0p/exportarr:v2.3.0 + imagePullPolicy: IfNotPresent + name: metrics + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /config + name: config + readOnly: true + volumes: + - name: config + persistentVolumeClaim: + claimName: radarr-config + - name: media + persistentVolumeClaim: + claimName: radarr-nfs-storage +--- +# Source: radarr/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: radarr5-postgresql-17-cluster + namespace: radarr + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-postgresql-17 + app.kubernetes.io/instance: radarr + app.kubernetes.io/part-of: radarr + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "radarr5-postgresql-17-external-backup" + serverName: "radarr5-postgresql-17-backup-2" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "radarr5-postgresql-17-garage-local-backup" + serverName: "radarr5-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "radarr5-postgresql-17-recovery" + serverName: radarr5-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 200m + memory: 1Gi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: radarr5-postgresql-17-backup-1 + + externalClusters: + - name: radarr5-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "radarr5-postgresql-17-recovery" + serverName: radarr5-postgresql-17-backup-1 +--- +# Source: radarr/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: radarr-config-backup-secret + namespace: radarr + labels: + app.kubernetes.io/name: radarr-config-backup-secret + app.kubernetes.io/instance: radarr + app.kubernetes.io/part-of: radarr +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/radarr5/radarr5-config" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: radarr/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: radarr-postgresql-17-cluster-backup-secret + namespace: radarr + labels: + app.kubernetes.io/name: radarr-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: radarr + app.kubernetes.io/part-of: radarr +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: radarr/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: radarr-postgresql-17-cluster-backup-secret-garage + namespace: radarr + labels: + app.kubernetes.io/name: radarr-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: radarr + app.kubernetes.io/part-of: radarr +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: radarr/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-radarr + namespace: radarr + labels: + app.kubernetes.io/name: http-route-radarr + app.kubernetes.io/instance: radarr + app.kubernetes.io/part-of: radarr +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - radarr.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: radarr + port: 80 + weight: 100 +--- +# Source: radarr/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "radarr5-postgresql-17-external-backup" + namespace: radarr + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-postgresql-17 + app.kubernetes.io/instance: radarr + app.kubernetes.io/part-of: radarr + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/radarr5/radarr5-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: radarr-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: radarr-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: radarr/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "radarr5-postgresql-17-garage-local-backup" + namespace: radarr + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-postgresql-17 + app.kubernetes.io/instance: radarr + app.kubernetes.io/part-of: radarr + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/radarr/radarr5-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: radarr-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: radarr-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: radarr-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: radarr/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "radarr5-postgresql-17-recovery" + namespace: radarr + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-postgresql-17 + app.kubernetes.io/instance: radarr + app.kubernetes.io/part-of: radarr + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/radarr5/radarr5-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: radarr-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: radarr-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: radarr/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: radarr5-postgresql-17-alert-rules + namespace: radarr + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-postgresql-17 + app.kubernetes.io/instance: radarr + app.kubernetes.io/part-of: radarr + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/radarr5-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: radarr + cnpg_cluster: radarr5-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: radarr + cnpg_cluster: radarr5-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr"}) < 1 + for: 5m + labels: + severity: critical + namespace: radarr + cnpg_cluster: radarr5-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr"}) < 2 + for: 5m + labels: + severity: warning + namespace: radarr + cnpg_cluster: radarr5-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: radarr + cnpg_cluster: radarr5-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: radarr + cnpg_cluster: radarr5-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="radarr",pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: radarr + cnpg_cluster: radarr5-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: radarr + cnpg_cluster: radarr5-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: radarr + cnpg_cluster: radarr5-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: radarr + cnpg_cluster: radarr5-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: radarr + cnpg_cluster: radarr5-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="radarr",pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: radarr + cnpg_cluster: radarr5-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: radarr + cnpg_cluster: radarr5-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: radarr + cnpg_cluster: radarr5-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: radarr + cnpg_cluster: radarr5-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: radarr + cnpg_cluster: radarr5-postgresql-17-cluster +--- +# Source: radarr/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: radarr + namespace: radarr + labels: + app.kubernetes.io/name: radarr + app.kubernetes.io/instance: radarr + app.kubernetes.io/part-of: radarr +spec: + groups: + - name: radarr + rules: + - alert: ExportarrAbsent + annotations: + description: Radarr Exportarr has disappeared from Prometheus + service discovery. + summary: Exportarr is down. + expr: | + absent(up{job=~".*radarr.*"} == 1) + for: 5m + labels: + severity: critical + - alert: RadarrDown + annotations: + description: Radarr service is down. + summary: Radarr is down. + expr: | + radarr_system_status{job=~".*radarr.*"} == 0 + for: 5m + labels: + severity: critical +--- +# Source: radarr/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: radarr-config-backup-source + namespace: radarr + labels: + app.kubernetes.io/name: radarr-config-backup-source + app.kubernetes.io/instance: radarr + app.kubernetes.io/part-of: radarr +spec: + sourcePVC: radarr-config + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: radarr-config-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + moverSecurityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: radarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "radarr5-postgresql-17-daily-backup-scheduled-backup" + namespace: radarr + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-postgresql-17 + app.kubernetes.io/instance: radarr + app.kubernetes.io/part-of: radarr + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: radarr5-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "radarr5-postgresql-17-external-backup" +--- +# Source: radarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "radarr5-postgresql-17-live-backup-scheduled-backup" + namespace: radarr + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: radarr5-postgresql-17 + app.kubernetes.io/instance: radarr + app.kubernetes.io/part-of: radarr + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: radarr5-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "radarr5-postgresql-17-garage-local-backup" +--- +# Source: radarr/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: radarr + namespace: radarr + labels: + app.kubernetes.io/name: radarr + app.kubernetes.io/instance: radarr + app.kubernetes.io/part-of: radarr +spec: + selector: + matchLabels: + app.kubernetes.io/name: radarr + app.kubernetes.io/instance: radarr + endpoints: + - port: metrics + interval: 3m + scrapeTimeout: 1m + path: /metrics diff --git a/clusters/cl01tl/manifests/roundcube/roundcube.yaml b/clusters/cl01tl/manifests/roundcube/roundcube.yaml new file mode 100644 index 000000000..ae0289b05 --- /dev/null +++ b/clusters/cl01tl/manifests/roundcube/roundcube.yaml @@ -0,0 +1,1005 @@ +--- +# Source: roundcube/charts/roundcube/templates/common.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: roundcube + labels: + app.kubernetes.io/instance: roundcube + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: roundcube + helm.sh/chart: roundcube-4.4.0 + namespace: roundcube +data: + default.conf: | + server { + listen 80 default_server; + server_name _; + root /var/www/html; + + location / { + try_files $uri /index.php$is_args$args; + } + + location ~ \.php(/|$) { + try_files $uri =404; + fastcgi_pass roundcube:9000; + fastcgi_read_timeout 300; + proxy_read_timeout 300; + fastcgi_split_path_info ^(.+\.php)(/.*)$; + include fastcgi_params; + fastcgi_param SCRIPT_FILENAME $realpath_root$fastcgi_script_name; + fastcgi_param DOCUMENT_ROOT $realpath_root; + internal; + } + + client_max_body_size 6m; + + error_log /var/log/nginx/error.log; + access_log /var/log/nginx/access.log; + } +--- +# Source: roundcube/charts/roundcube/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: roundcube-data + labels: + app.kubernetes.io/instance: roundcube + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: roundcube + helm.sh/chart: roundcube-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: roundcube +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: roundcube/charts/roundcube/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: roundcube + labels: + app.kubernetes.io/instance: roundcube + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: roundcube + app.kubernetes.io/service: roundcube + helm.sh/chart: roundcube-4.4.0 + namespace: roundcube +spec: + type: ClusterIP + ports: + - port: 9000 + targetPort: 9000 + protocol: TCP + name: mail + - port: 80 + targetPort: 80 + protocol: TCP + name: web + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: roundcube + app.kubernetes.io/name: roundcube +--- +# Source: roundcube/charts/roundcube/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: roundcube-main + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: roundcube + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: roundcube + helm.sh/chart: roundcube-4.4.0 + namespace: roundcube +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: roundcube + app.kubernetes.io/instance: roundcube + template: + metadata: + annotations: + checksum/configMaps: fb5b79e14a16673def67423a38952ae1855171d07a8332d9e863febcd28fce92 + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: roundcube + app.kubernetes.io/name: roundcube + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: ROUNDCUBEMAIL_DB_TYPE + value: pgsql + - name: ROUNDCUBEMAIL_DB_HOST + valueFrom: + secretKeyRef: + key: host + name: roundcube-postgresql-17-cluster-app + - name: ROUNDCUBEMAIL_DB_NAME + valueFrom: + secretKeyRef: + key: dbname + name: roundcube-postgresql-17-cluster-app + - name: ROUNDCUBEMAIL_DB_USER + valueFrom: + secretKeyRef: + key: user + name: roundcube-postgresql-17-cluster-app + - name: ROUNDCUBEMAIL_DB_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: roundcube-postgresql-17-cluster-app + - name: ROUNDCUBEMAIL_DES_KEY + valueFrom: + secretKeyRef: + key: DES_KEY + name: roundcube-key-secret + - name: ROUNDCUBEMAIL_DEFAULT_HOST + value: stalwart.stalwart + - name: ROUNDCUBEMAIL_DEFAULT_PORT + value: "143" + - name: ROUNDCUBEMAIL_SMTP_SERVER + value: stalwart.stalwart + - name: ROUNDCUBEMAIL_SMTP_PORT + value: "25" + - name: ROUNDCUBEMAIL_SKIN + value: elastic + - name: ROUNDCUBEMAIL_PLUGINS + value: archive,zipdownload,newmail_notifier + image: roundcube/roundcubemail:1.6.11-fpm-alpine + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /var/www/html + name: data + - mountPath: /tmp/roundcube-temp + name: temp + - env: + - name: NGINX_HOST + value: mail.alexlebens.net + - name: NGINX_PHP_CGI + value: roundcube.roundcube:9000 + image: nginx:1.29.3-alpine + imagePullPolicy: IfNotPresent + name: nginx + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /etc/nginx/conf.d/default.conf + mountPropagation: None + name: config + readOnly: true + subPath: default.conf + - mountPath: /var/www/html + name: data + volumes: + - configMap: + name: roundcube-config + name: config + - name: data + persistentVolumeClaim: + claimName: roundcube-data + - emptyDir: {} + name: temp +--- +# Source: roundcube/charts/roundcube/templates/common.yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: roundcube-cleandb + labels: + app.kubernetes.io/controller: cleandb + app.kubernetes.io/instance: roundcube + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: roundcube + helm.sh/chart: roundcube-4.4.0 + namespace: roundcube +spec: + suspend: false + concurrencyPolicy: Forbid + startingDeadlineSeconds: 90 + timeZone: US/Central + schedule: "30 4 * * *" + successfulJobsHistoryLimit: 3 + failedJobsHistoryLimit: 3 + jobTemplate: + spec: + parallelism: 1 + backoffLimit: 3 + template: + metadata: + annotations: + checksum/configMaps: fb5b79e14a16673def67423a38952ae1855171d07a8332d9e863febcd28fce92 + labels: + app.kubernetes.io/controller: cleandb + app.kubernetes.io/instance: roundcube + app.kubernetes.io/name: roundcube + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + restartPolicy: Never + containers: + - args: + - bin/cleandb.sh + env: + - name: ROUNDCUBEMAIL_DB_TYPE + value: pgsql + - name: ROUNDCUBEMAIL_DB_HOST + valueFrom: + secretKeyRef: + key: host + name: roundcube-postgresql-17-cluster-app + - name: ROUNDCUBEMAIL_DB_NAME + valueFrom: + secretKeyRef: + key: dbname + name: roundcube-postgresql-17-cluster-app + - name: ROUNDCUBEMAIL_DB_USER + valueFrom: + secretKeyRef: + key: user + name: roundcube-postgresql-17-cluster-app + - name: ROUNDCUBEMAIL_DB_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: roundcube-postgresql-17-cluster-app + - name: ROUNDCUBEMAIL_DES_KEY + valueFrom: + secretKeyRef: + key: DES_KEY + name: roundcube-key-secret + - name: ROUNDCUBEMAIL_DEFAULT_HOST + value: tls://stalwart.stalwart + - name: ROUNDCUBEMAIL_SMTP_SERVER + value: tls://stalwart.stalwart + - name: ROUNDCUBEMAIL_SKIN + value: elastic + - name: ROUNDCUBEMAIL_PLUGINS + value: archive,zipdownload,newmail_notifier + image: roundcube/roundcubemail:1.6.11-fpm-alpine + imagePullPolicy: IfNotPresent + name: backup + resources: + requests: + cpu: 100m + memory: 128Mi +--- +# Source: roundcube/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: roundcube-postgresql-17-cluster + namespace: roundcube + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: roundcube-postgresql-17 + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "roundcube-postgresql-17-external-backup" + serverName: "roundcube-postgresql-17-backup-2" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "roundcube-postgresql-17-garage-local-backup" + serverName: "roundcube-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "roundcube-postgresql-17-recovery" + serverName: roundcube-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 256Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: roundcube-postgresql-17-backup-1 + + externalClusters: + - name: roundcube-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "roundcube-postgresql-17-recovery" + serverName: roundcube-postgresql-17-backup-1 +--- +# Source: roundcube/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: roundcube-key-secret + namespace: roundcube + labels: + app.kubernetes.io/name: roundcube-key-secret + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: DES_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/roundcube/key + metadataPolicy: None + property: DES_KEY +--- +# Source: roundcube/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: roundcube-data-backup-secret + namespace: roundcube + labels: + app.kubernetes.io/name: roundcube-data-backup-secret + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/roundcube/roundcube-data" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: roundcube/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: roundcube-postgresql-17-cluster-backup-secret + namespace: roundcube + labels: + app.kubernetes.io/name: roundcube-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: roundcube/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: roundcube-postgresql-17-cluster-backup-secret-garage + namespace: roundcube + labels: + app.kubernetes.io/name: roundcube-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: roundcube/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-mail + namespace: roundcube + labels: + app.kubernetes.io/name: http-route-mail + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - mail.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: roundcube + port: 80 + weight: 100 +--- +# Source: roundcube/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "roundcube-postgresql-17-external-backup" + namespace: roundcube + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: roundcube-postgresql-17 + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/roundcube/roundcube-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: roundcube-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: roundcube-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: roundcube/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "roundcube-postgresql-17-garage-local-backup" + namespace: roundcube + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: roundcube-postgresql-17 + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/roundcube/roundcube-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: roundcube-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: roundcube-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: roundcube-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: roundcube/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "roundcube-postgresql-17-recovery" + namespace: roundcube + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: roundcube-postgresql-17 + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/roundcube/roundcube-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: roundcube-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: roundcube-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: roundcube/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: roundcube-postgresql-17-alert-rules + namespace: roundcube + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: roundcube-postgresql-17 + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/roundcube-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="roundcube"} - cnpg_pg_replication_is_wal_receiver_up{namespace="roundcube"}) < 1 + for: 5m + labels: + severity: critical + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="roundcube"} - cnpg_pg_replication_is_wal_receiver_up{namespace="roundcube"}) < 2 + for: 5m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "roundcube/roundcube-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="roundcube", pod=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="roundcube", pod=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "roundcube/roundcube-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="roundcube", pod=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="roundcube", pod=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "roundcube/roundcube-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="roundcube",pod=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "roundcube/roundcube-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="roundcube", pod=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "roundcube/roundcube-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "roundcube/roundcube-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "roundcube/roundcube-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="roundcube",pod=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "roundcube/roundcube-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="roundcube", pod=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster +--- +# Source: roundcube/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: roundcube-data-backup-source + namespace: roundcube + labels: + app.kubernetes.io/name: roundcube-data-backup-source + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube +spec: + sourcePVC: roundcube-data + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: roundcube-data-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: roundcube/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "roundcube-postgresql-17-daily-backup-scheduled-backup" + namespace: roundcube + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: roundcube-postgresql-17 + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: roundcube-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "roundcube-postgresql-17-external-backup" +--- +# Source: roundcube/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "roundcube-postgresql-17-live-backup-scheduled-backup" + namespace: roundcube + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: roundcube-postgresql-17 + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: roundcube-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "roundcube-postgresql-17-garage-local-backup" diff --git a/clusters/cl01tl/manifests/searxng/searxng.yaml b/clusters/cl01tl/manifests/searxng/searxng.yaml new file mode 100644 index 000000000..94b95ca8e --- /dev/null +++ b/clusters/cl01tl/manifests/searxng/searxng.yaml @@ -0,0 +1,435 @@ +--- +# Source: searxng/charts/searxng/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: searxng-api-data + labels: + app.kubernetes.io/instance: searxng + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: searxng + helm.sh/chart: searxng-4.4.0 + namespace: searxng +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: searxng/charts/searxng/templates/common.yaml +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: searxng-browser-data + labels: + app.kubernetes.io/instance: searxng + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: searxng + helm.sh/chart: searxng-4.4.0 + namespace: searxng +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: searxng/charts/searxng/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: searxng-api + labels: + app.kubernetes.io/instance: searxng + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: searxng + app.kubernetes.io/service: searxng-api + helm.sh/chart: searxng-4.4.0 + namespace: searxng +spec: + type: ClusterIP + ports: + - port: 8080 + targetPort: 8080 + protocol: TCP + name: mail + selector: + app.kubernetes.io/controller: api + app.kubernetes.io/instance: searxng + app.kubernetes.io/name: searxng +--- +# Source: searxng/charts/searxng/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: searxng-browser + labels: + app.kubernetes.io/instance: searxng + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: searxng + app.kubernetes.io/service: searxng-browser + helm.sh/chart: searxng-4.4.0 + namespace: searxng +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: mail + selector: + app.kubernetes.io/controller: browser + app.kubernetes.io/instance: searxng + app.kubernetes.io/name: searxng +--- +# Source: searxng/charts/searxng/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: searxng-api + labels: + app.kubernetes.io/controller: api + app.kubernetes.io/instance: searxng + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: searxng + helm.sh/chart: searxng-4.4.0 + namespace: searxng +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: api + app.kubernetes.io/name: searxng + app.kubernetes.io/instance: searxng + template: + metadata: + labels: + app.kubernetes.io/controller: api + app.kubernetes.io/instance: searxng + app.kubernetes.io/name: searxng + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: SEARXNG_BASE_URL + value: http://searxng-api.searxng:8080 + - name: SEARXNG_QUERY_URL + value: http://searxng-api.searxng:8080/search?q= + - name: SEARXNG_HOSTNAME + value: searxng-api.searxng + - name: UWSGI_WORKERS + value: "4" + - name: UWSGI_THREADS + value: "4" + - name: ENABLE_RAG_WEB_SEARCH + value: "true" + - name: RAG_WEB_SEARCH_ENGINE + value: searxng + - name: RAG_WEB_SEARCH_RESULT_COUNT + value: "3" + - name: RAG_WEB_SEARCH_CONCURRENT_REQUESTS + value: "10" + image: searxng/searxng:latest@sha256:0124d32d77e0c7360d0b85f5d91882d1837e6ceb243c82e190f5d7e9f1401334 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /etc/searxng + name: api-data + - mountPath: /etc/searxng/settings.yml + mountPropagation: None + name: config + readOnly: true + subPath: settings.yml + - mountPath: /etc/searxng/limiter.toml + mountPropagation: None + name: config + readOnly: true + subPath: limiter.toml + volumes: + - name: api-data + persistentVolumeClaim: + claimName: searxng-api-data + - name: config + secret: + secretName: searxng-api-config-secret +--- +# Source: searxng/charts/searxng/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: searxng-browser + labels: + app.kubernetes.io/controller: browser + app.kubernetes.io/instance: searxng + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: searxng + helm.sh/chart: searxng-4.4.0 + namespace: searxng +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: browser + app.kubernetes.io/name: searxng + app.kubernetes.io/instance: searxng + template: + metadata: + labels: + app.kubernetes.io/controller: browser + app.kubernetes.io/instance: searxng + app.kubernetes.io/name: searxng + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: SEARXNG_BASE_URL + value: https://searxng.alexlebens.net/ + - name: SEARXNG_QUERY_URL + value: https://searxng.alexlebens.net/search?q= + - name: SEARXNG_HOSTNAME + value: searxng.alexlebens.net + - name: SEARXNG_REDIS_URL + value: redis://redis-replication-searxng-master.searxng:6379/0 + - name: UWSGI_WORKERS + value: "4" + - name: UWSGI_THREADS + value: "4" + image: searxng/searxng:latest@sha256:0124d32d77e0c7360d0b85f5d91882d1837e6ceb243c82e190f5d7e9f1401334 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /etc/searxng + name: browser-data + volumes: + - name: browser-data + persistentVolumeClaim: + claimName: searxng-browser-data +--- +# Source: searxng/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: searxng-api-config-secret + namespace: searxng + labels: + app.kubernetes.io/name: searxng-api-config-secret + app.kubernetes.io/instance: searxng + app.kubernetes.io/part-of: searxng +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: settings.yml + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/searxng/api/config + metadataPolicy: None + property: settings.yml + - secretKey: limiter.toml + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/searxng/api/config + metadataPolicy: None + property: limiter.toml +--- +# Source: searxng/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: searxng-browser-data-backup-secret + namespace: searxng + labels: + app.kubernetes.io/name: searxng-browser-data-backup-secret + app.kubernetes.io/instance: searxng + app.kubernetes.io/part-of: searxng +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/searxng/searxng-browser-data" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: searxng/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-searxng + namespace: searxng + labels: + app.kubernetes.io/name: http-route-searxng + app.kubernetes.io/instance: searxng + app.kubernetes.io/part-of: searxng +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - searxng.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: searxng-browser + port: 80 + weight: 100 +--- +# Source: searxng/templates/redis-replication.yaml +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisReplication +metadata: + name: redis-replication-searxng + namespace: searxng + labels: + app.kubernetes.io/name: redis-replication-searxng + app.kubernetes.io/instance: searxng + app.kubernetes.io/part-of: searxng +spec: + clusterSize: 3 + podSecurityContext: + runAsUser: 1000 + fsGroup: 1000 + kubernetesConfig: + image: quay.io/opstree/redis:v8.0.3 + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 50m + memory: 128Mi + storage: + volumeClaimTemplate: + spec: + storageClassName: ceph-block + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi + redisExporter: + enabled: true + image: quay.io/opstree/redis-exporter:v1.48.0 +--- +# Source: searxng/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: searxng-browser-data-backup-source + namespace: searxng + labels: + app.kubernetes.io/name: searxng-browser-data-backup-source + app.kubernetes.io/instance: searxng + app.kubernetes.io/part-of: searxng +spec: + sourcePVC: searxng-browser-data + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: searxng-browser-data-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: searxng/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: redis-replication-searxng + namespace: searxng + labels: + app.kubernetes.io/name: redis-replication-searxng + app.kubernetes.io/instance: searxng + app.kubernetes.io/part-of: searxng + redis-operator: "true" + env: production +spec: + selector: + matchLabels: + redis_setup_type: replication + endpoints: + - port: redis-exporter + interval: 30s + scrapeTimeout: 10s diff --git a/clusters/cl01tl/manifests/site-documentation/site-documentation.yaml b/clusters/cl01tl/manifests/site-documentation/site-documentation.yaml new file mode 100644 index 000000000..96df16ff6 --- /dev/null +++ b/clusters/cl01tl/manifests/site-documentation/site-documentation.yaml @@ -0,0 +1,153 @@ +--- +# Source: site-documentation/charts/site-documentation/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: site-documentation + labels: + app.kubernetes.io/instance: site-documentation + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: site-documentation + app.kubernetes.io/service: site-documentation + helm.sh/chart: site-documentation-4.4.0 + namespace: site-documentation +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 4321 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: site-documentation + app.kubernetes.io/name: site-documentation +--- +# Source: site-documentation/charts/cloudflared-site/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: site-documentation-cloudflared-site + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: site-documentation + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cloudflared-site + app.kubernetes.io/version: 2025.10.0 + helm.sh/chart: cloudflared-site-1.23.0 + namespace: site-documentation +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: cloudflared-site + app.kubernetes.io/instance: site-documentation + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: site-documentation + app.kubernetes.io/name: cloudflared-site + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - args: + - tunnel + - --protocol + - http2 + - --no-autoupdate + - run + - --token + - $(CF_MANAGED_TUNNEL_TOKEN) + env: + - name: CF_MANAGED_TUNNEL_TOKEN + valueFrom: + secretKeyRef: + key: cf-tunnel-token + name: site-documentation-cloudflared-api-secret + image: cloudflare/cloudflared:2025.11.1 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: site-documentation/charts/site-documentation/templates/common.yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: site-documentation + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: site-documentation + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: site-documentation + helm.sh/chart: site-documentation-4.4.0 + namespace: site-documentation +spec: + revisionHistoryLimit: 3 + replicas: 3 + strategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: site-documentation + app.kubernetes.io/instance: site-documentation + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: site-documentation + app.kubernetes.io/name: site-documentation + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - image: harbor.alexlebens.net/images/site-documentation:0.0.3 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: site-documentation/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: site-documentation-cloudflared-api-secret + namespace: site-documentation + labels: + app.kubernetes.io/name: site-documentation-cloudflared-api-secret + app.kubernetes.io/instance: site-documentation + app.kubernetes.io/part-of: site-documentation +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: cf-tunnel-token + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cloudflare/tunnels/site-documentation + metadataPolicy: None + property: token diff --git a/clusters/cl01tl/manifests/site-profile/site-profile.yaml b/clusters/cl01tl/manifests/site-profile/site-profile.yaml new file mode 100644 index 000000000..f16ff4c21 --- /dev/null +++ b/clusters/cl01tl/manifests/site-profile/site-profile.yaml @@ -0,0 +1,153 @@ +--- +# Source: site-profile/charts/site-profile/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: site-profile + labels: + app.kubernetes.io/instance: site-profile + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: site-profile + app.kubernetes.io/service: site-profile + helm.sh/chart: site-profile-4.4.0 + namespace: site-profile +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 4321 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: site-profile + app.kubernetes.io/name: site-profile +--- +# Source: site-profile/charts/cloudflared-site/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: site-profile-cloudflared-site + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: site-profile + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cloudflared-site + app.kubernetes.io/version: 2025.10.0 + helm.sh/chart: cloudflared-site-1.23.0 + namespace: site-profile +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: cloudflared-site + app.kubernetes.io/instance: site-profile + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: site-profile + app.kubernetes.io/name: cloudflared-site + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - args: + - tunnel + - --protocol + - http2 + - --no-autoupdate + - run + - --token + - $(CF_MANAGED_TUNNEL_TOKEN) + env: + - name: CF_MANAGED_TUNNEL_TOKEN + valueFrom: + secretKeyRef: + key: cf-tunnel-token + name: site-profile-cloudflared-api-secret + image: cloudflare/cloudflared:2025.11.1 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: site-profile/charts/site-profile/templates/common.yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: site-profile + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: site-profile + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: site-profile + helm.sh/chart: site-profile-4.4.0 + namespace: site-profile +spec: + revisionHistoryLimit: 3 + replicas: 3 + strategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: site-profile + app.kubernetes.io/instance: site-profile + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: site-profile + app.kubernetes.io/name: site-profile + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - image: harbor.alexlebens.net/images/site-profile:2.1.0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: site-profile/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: site-profile-cloudflared-api-secret + namespace: site-profile + labels: + app.kubernetes.io/name: site-profile-cloudflared-api-secret + app.kubernetes.io/instance: site-profile + app.kubernetes.io/part-of: site-profile +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: cf-tunnel-token + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cloudflare/tunnels/site-profile + metadataPolicy: None + property: token diff --git a/clusters/cl01tl/manifests/slskd/slskd.yaml b/clusters/cl01tl/manifests/slskd/slskd.yaml new file mode 100644 index 000000000..dc896bd7d --- /dev/null +++ b/clusters/cl01tl/manifests/slskd/slskd.yaml @@ -0,0 +1,396 @@ +--- +# Source: slskd/templates/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: slskd + labels: + app.kubernetes.io/name: slskd + app.kubernetes.io/instance: slskd + app.kubernetes.io/part-of: slskd + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/warn: privileged +--- +# Source: slskd/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: slskd-nfs-storage + namespace: slskd + labels: + app.kubernetes.io/name: slskd-nfs-storage + app.kubernetes.io/instance: slskd + app.kubernetes.io/part-of: slskd +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: slskd/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: slskd-nfs-storage + namespace: slskd + labels: + app.kubernetes.io/name: slskd-nfs-storage + app.kubernetes.io/instance: slskd + app.kubernetes.io/part-of: slskd +spec: + volumeName: slskd-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: slskd/charts/slskd/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: slskd + labels: + app.kubernetes.io/instance: slskd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: slskd + app.kubernetes.io/service: slskd + helm.sh/chart: slskd-4.4.0 + namespace: slskd +spec: + type: ClusterIP + ports: + - port: 5030 + targetPort: 5030 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: slskd + app.kubernetes.io/name: slskd +--- +# Source: slskd/charts/slskd/templates/common.yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: slskd-main + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: slskd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: slskd + helm.sh/chart: slskd-4.4.0 + namespace: slskd +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: slskd + app.kubernetes.io/instance: slskd + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: slskd + app.kubernetes.io/name: slskd + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + initContainers: + - args: + - -ec + - | + sysctl -w net.ipv4.ip_forward=1; + sysctl -w net.ipv6.conf.all.disable_ipv6=1 + command: + - /bin/sh + image: busybox:1.37.0 + imagePullPolicy: IfNotPresent + name: init-sysctl + resources: + requests: + cpu: 10m + memory: 128Mi + securityContext: + privileged: true + containers: + - env: + - name: VPN_SERVICE_PROVIDER + value: protonvpn + - name: VPN_TYPE + value: wireguard + - name: WIREGUARD_PRIVATE_KEY + valueFrom: + secretKeyRef: + key: private-key + name: slskd-wireguard-conf + - name: VPN_PORT_FORWARDING + value: "on" + - name: PORT_FORWARD_ONLY + value: "on" + - name: FIREWALL_OUTBOUND_SUBNETS + value: 192.168.1.0/24,10.244.0.0/16 + - name: FIREWALL_INPUT_PORTS + value: 5030,50300 + - name: DOT + value: "off" + image: ghcr.io/qdm12/gluetun:v3.40.3@sha256:ef4a44819a60469682c7b5e69183e6401171891feaa60186652d292c59e41b30 + imagePullPolicy: IfNotPresent + name: gluetun + resources: + limits: + devic.es/tun: "1" + requests: + cpu: 10m + devic.es/tun: "1" + memory: 128Mi + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + privileged: true + - env: + - name: TZ + value: US/Central + - name: PUID + value: "1000" + - name: PGID + value: "1000" + - name: SLSKD_UMASK + value: "0" + image: slskd/slskd:0.24.0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 100m + memory: 512Mi + volumeMounts: + - mountPath: /mnt/store + name: data + - mountPath: /app/slskd.yml + mountPropagation: None + name: slskd-config + readOnly: true + subPath: slskd.yml + volumes: + - name: data + persistentVolumeClaim: + claimName: slskd-nfs-storage + - name: slskd-config + secret: + secretName: slskd-config-secret +--- +# Source: slskd/charts/slskd/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: slskd-soularr + labels: + app.kubernetes.io/controller: soularr + app.kubernetes.io/instance: slskd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: slskd + helm.sh/chart: slskd-4.4.0 + namespace: slskd +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: soularr + app.kubernetes.io/name: slskd + app.kubernetes.io/instance: slskd + template: + metadata: + labels: + app.kubernetes.io/controller: soularr + app.kubernetes.io/instance: slskd + app.kubernetes.io/name: slskd + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + securityContext: + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: PUID + value: "1000" + - name: PGID + value: "1000" + - name: SCRIPT_INTERVAL + value: "300" + image: mrusse08/soularr:latest@sha256:71a0b9e5a522d76bb0ffdb6d720d681fde22417b3a5acc9ecae61c89d05d8afc + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /mnt/store + name: data + - mountPath: /data/config.ini + mountPropagation: None + name: soularr-config + readOnly: true + subPath: config.ini + volumes: + - name: data + persistentVolumeClaim: + claimName: slskd-nfs-storage + - name: soularr-config + secret: + secretName: soularr-config-secret +--- +# Source: slskd/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: slskd-config-secret + namespace: slskd + labels: + app.kubernetes.io/name: slskd-config-secret + app.kubernetes.io/instance: slskd + app.kubernetes.io/part-of: slskd +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: slskd.yml + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/slskd/config + metadataPolicy: None + property: slskd.yml +--- +# Source: slskd/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: soularr-config-secret + namespace: slskd + labels: + app.kubernetes.io/name: soularr-config-secret + app.kubernetes.io/instance: slskd + app.kubernetes.io/part-of: slskd +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: config.ini + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/slskd/soularr + metadataPolicy: None + property: config.ini +--- +# Source: slskd/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: slskd-wireguard-conf + namespace: slskd + labels: + app.kubernetes.io/name: slskd-wireguard-conf + app.kubernetes.io/instance: slskd + app.kubernetes.io/part-of: slskd +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: private-key + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /protonvpn/conf/cl01tl + metadataPolicy: None + property: private-key +--- +# Source: slskd/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-slskd + namespace: slskd + labels: + app.kubernetes.io/name: http-route-slskd + app.kubernetes.io/instance: slskd + app.kubernetes.io/part-of: slskd +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - slskd.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: slskd + port: 5030 + weight: 100 +--- +# Source: slskd/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: slskd + namespace: slskd + labels: + app.kubernetes.io/name: slskd + app.kubernetes.io/instance: slskd + app.kubernetes.io/part-of: slskd +spec: + selector: + matchLabels: + app.kubernetes.io/name: slskd + app.kubernetes.io/instance: slskd + endpoints: + - port: http + interval: 3m + scrapeTimeout: 1m + path: /metrics diff --git a/clusters/cl01tl/manifests/sonarr-4k/sonarr-4k.yaml b/clusters/cl01tl/manifests/sonarr-4k/sonarr-4k.yaml new file mode 100644 index 000000000..686d78957 --- /dev/null +++ b/clusters/cl01tl/manifests/sonarr-4k/sonarr-4k.yaml @@ -0,0 +1,928 @@ +--- +# Source: sonarr-4k/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: sonarr-4k-nfs-storage + namespace: sonarr-4k + labels: + app.kubernetes.io/name: sonarr-4k-nfs-storage + app.kubernetes.io/instance: sonarr-4k + app.kubernetes.io/part-of: sonarr-4k +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: sonarr-4k/charts/sonarr-4k/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: sonarr-4k-config + labels: + app.kubernetes.io/instance: sonarr-4k + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: sonarr-4k + helm.sh/chart: sonarr-4k-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: sonarr-4k +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "20Gi" + storageClassName: "ceph-block" +--- +# Source: sonarr-4k/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: sonarr-4k-nfs-storage + namespace: sonarr-4k + labels: + app.kubernetes.io/name: sonarr-4k-nfs-storage + app.kubernetes.io/instance: sonarr-4k + app.kubernetes.io/part-of: sonarr-4k +spec: + volumeName: sonarr-4k-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: sonarr-4k/charts/sonarr-4k/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: sonarr-4k + labels: + app.kubernetes.io/instance: sonarr-4k + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: sonarr-4k + app.kubernetes.io/service: sonarr-4k + helm.sh/chart: sonarr-4k-4.4.0 + namespace: sonarr-4k +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 8989 + protocol: TCP + name: http + - port: 9794 + targetPort: 9794 + protocol: TCP + name: metrics + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: sonarr-4k + app.kubernetes.io/name: sonarr-4k +--- +# Source: sonarr-4k/charts/sonarr-4k/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: sonarr-4k + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: sonarr-4k + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: sonarr-4k + helm.sh/chart: sonarr-4k-4.4.0 + namespace: sonarr-4k +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: sonarr-4k + app.kubernetes.io/instance: sonarr-4k + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: sonarr-4k + app.kubernetes.io/name: sonarr-4k + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + securityContext: + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: PUID + value: "1000" + - name: PGID + value: "1000" + image: ghcr.io/linuxserver/sonarr:4.0.16@sha256:60e5edcac39172294ad22d55d1b08c2c0a9fe658cad2f2c4d742ae017d7874de + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /config + name: config + - mountPath: /mnt/store + name: media + - args: + - sonarr + env: + - name: URL + value: http://localhost + - name: CONFIG + value: /config/config.xml + - name: PORT + value: "9794" + - name: ENABLE_ADDITIONAL_METRICS + value: "false" + - name: ENABLE_UNKNOWN_QUEUE_ITEMS + value: "false" + image: ghcr.io/onedr0p/exportarr:v2.3.0 + imagePullPolicy: IfNotPresent + name: metrics + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /config + name: config + readOnly: true + volumes: + - name: config + persistentVolumeClaim: + claimName: sonarr-4k-config + - name: media + persistentVolumeClaim: + claimName: sonarr-4k-nfs-storage +--- +# Source: sonarr-4k/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: sonarr4-4k-postgresql-17-cluster + namespace: sonarr-4k + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: sonarr4-4k-postgresql-17 + app.kubernetes.io/instance: sonarr-4k + app.kubernetes.io/part-of: sonarr-4k + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "sonarr4-4k-postgresql-17-external-backup" + serverName: "sonarr4-4k-postgresql-17-backup-1" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "sonarr4-4k-postgresql-17-garage-local-backup" + serverName: "sonarr4-4k-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "sonarr4-4k-postgresql-17-recovery" + serverName: sonarr4-4k-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 512Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: sonarr4-4k-postgresql-17-backup-1 + + externalClusters: + - name: sonarr4-4k-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "sonarr4-4k-postgresql-17-recovery" + serverName: sonarr4-4k-postgresql-17-backup-1 +--- +# Source: sonarr-4k/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: sonarr-4k-config-backup-secret + namespace: sonarr-4k + labels: + app.kubernetes.io/name: sonarr-4k-config-backup-secret + app.kubernetes.io/instance: sonarr-4k + app.kubernetes.io/part-of: sonarr-4k +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/sonarr4-4k/sonarr4-4k-config" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: sonarr-4k/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: sonarr-4k-postgresql-17-cluster-backup-secret + namespace: sonarr-4k + labels: + app.kubernetes.io/name: sonarr-4k-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: sonarr-4k + app.kubernetes.io/part-of: sonarr-4k +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: sonarr-4k/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: sonarr-4k-postgresql-17-cluster-backup-secret-garage + namespace: sonarr-4k + labels: + app.kubernetes.io/name: sonarr-4k-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: sonarr-4k + app.kubernetes.io/part-of: sonarr-4k +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: sonarr-4k/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-sonarr-4k + namespace: sonarr-4k + labels: + app.kubernetes.io/name: http-route-sonarr-4k + app.kubernetes.io/instance: sonarr-4k + app.kubernetes.io/part-of: sonarr-4k +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - sonarr-4k.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: sonarr-4k + port: 80 + weight: 100 +--- +# Source: sonarr-4k/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "sonarr4-4k-postgresql-17-external-backup" + namespace: sonarr-4k + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: sonarr4-4k-postgresql-17 + app.kubernetes.io/instance: sonarr-4k + app.kubernetes.io/part-of: sonarr-4k + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/sonarr4-4k/sonarr4-4k-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: sonarr-4k-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: sonarr-4k-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: sonarr-4k/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "sonarr4-4k-postgresql-17-garage-local-backup" + namespace: sonarr-4k + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: sonarr4-4k-postgresql-17 + app.kubernetes.io/instance: sonarr-4k + app.kubernetes.io/part-of: sonarr-4k + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/sonarr-4k/sonarr4-4k-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: sonarr-4k-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: sonarr-4k-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: sonarr-4k-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: sonarr-4k/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "sonarr4-4k-postgresql-17-recovery" + namespace: sonarr-4k + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: sonarr4-4k-postgresql-17 + app.kubernetes.io/instance: sonarr-4k + app.kubernetes.io/part-of: sonarr-4k + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/sonarr-4k/sonarr4-4k-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: sonarr-4k-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: sonarr-4k-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: sonarr-4k/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: sonarr4-4k-postgresql-17-alert-rules + namespace: sonarr-4k + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: sonarr4-4k-postgresql-17 + app.kubernetes.io/instance: sonarr-4k + app.kubernetes.io/part-of: sonarr-4k + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/sonarr4-4k-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: sonarr-4k + cnpg_cluster: sonarr4-4k-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: sonarr-4k + cnpg_cluster: sonarr4-4k-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr-4k"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr-4k"}) < 1 + for: 5m + labels: + severity: critical + namespace: sonarr-4k + cnpg_cluster: sonarr4-4k-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr-4k"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr-4k"}) < 2 + for: 5m + labels: + severity: warning + namespace: sonarr-4k + cnpg_cluster: sonarr4-4k-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: sonarr-4k + cnpg_cluster: sonarr4-4k-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: sonarr-4k + cnpg_cluster: sonarr4-4k-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="sonarr-4k",pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: sonarr-4k + cnpg_cluster: sonarr4-4k-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: sonarr-4k + cnpg_cluster: sonarr4-4k-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: sonarr-4k + cnpg_cluster: sonarr4-4k-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: sonarr-4k + cnpg_cluster: sonarr4-4k-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: sonarr-4k + cnpg_cluster: sonarr4-4k-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="sonarr-4k",pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: sonarr-4k + cnpg_cluster: sonarr4-4k-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: sonarr-4k + cnpg_cluster: sonarr4-4k-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: sonarr-4k + cnpg_cluster: sonarr4-4k-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: sonarr-4k + cnpg_cluster: sonarr4-4k-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: sonarr-4k + cnpg_cluster: sonarr4-4k-postgresql-17-cluster +--- +# Source: sonarr-4k/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: sonarr-4k + namespace: sonarr-4k + labels: + app.kubernetes.io/name: sonarr-4k + app.kubernetes.io/instance: sonarr-4k + app.kubernetes.io/part-of: sonarr-4k +spec: + groups: + - name: sonarr-4k + rules: + - alert: ExportarrAbsent + annotations: + description: Sonarr 4K Exportarr has disappeared from Prometheus + service discovery. + summary: Exportarr is down. + expr: | + absent(up{job=~".*sonarr-4k.*"} == 1) + for: 5m + labels: + severity: critical + - alert: Sonarr4KDown + annotations: + description: Sonarr 4K service is down. + summary: Sonarr 4K is down. + expr: | + sonarr_4k_system_status{job=~".*sonarr-4k.*"} == 0 + for: 5m + labels: + severity: critical +--- +# Source: sonarr-4k/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: sonarr-4k-config-backup-source + namespace: sonarr-4k + labels: + app.kubernetes.io/name: sonarr-4k-config-backup-source + app.kubernetes.io/instance: sonarr-4k + app.kubernetes.io/part-of: sonarr-4k +spec: + sourcePVC: sonarr-4k-config + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: sonarr-4k-config-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + moverSecurityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: sonarr-4k/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "sonarr4-4k-postgresql-17-daily-backup-scheduled-backup" + namespace: sonarr-4k + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: sonarr4-4k-postgresql-17 + app.kubernetes.io/instance: sonarr-4k + app.kubernetes.io/part-of: sonarr-4k + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: sonarr4-4k-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "sonarr4-4k-postgresql-17-external-backup" +--- +# Source: sonarr-4k/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "sonarr4-4k-postgresql-17-live-backup-scheduled-backup" + namespace: sonarr-4k + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: sonarr4-4k-postgresql-17 + app.kubernetes.io/instance: sonarr-4k + app.kubernetes.io/part-of: sonarr-4k + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: sonarr4-4k-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "sonarr4-4k-postgresql-17-garage-local-backup" +--- +# Source: sonarr-4k/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: sonarr-4k + namespace: sonarr-4k + labels: + app.kubernetes.io/name: sonarr-4k + app.kubernetes.io/instance: sonarr-4k + app.kubernetes.io/part-of: sonarr-4k +spec: + selector: + matchLabels: + app.kubernetes.io/name: sonarr-4k + app.kubernetes.io/instance: sonarr-4k + endpoints: + - port: metrics + interval: 3m + scrapeTimeout: 1m + path: /metrics diff --git a/clusters/cl01tl/manifests/sonarr-anime/sonarr-anime.yaml b/clusters/cl01tl/manifests/sonarr-anime/sonarr-anime.yaml new file mode 100644 index 000000000..5c12ef68c --- /dev/null +++ b/clusters/cl01tl/manifests/sonarr-anime/sonarr-anime.yaml @@ -0,0 +1,928 @@ +--- +# Source: sonarr-anime/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: sonarr-anime-nfs-storage + namespace: sonarr-anime + labels: + app.kubernetes.io/name: sonarr-anime-nfs-storage + app.kubernetes.io/instance: sonarr-anime + app.kubernetes.io/part-of: sonarr-anime +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: sonarr-anime/charts/sonarr-anime/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: sonarr-anime-config + labels: + app.kubernetes.io/instance: sonarr-anime + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: sonarr-anime + helm.sh/chart: sonarr-anime-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: sonarr-anime +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "20Gi" + storageClassName: "ceph-block" +--- +# Source: sonarr-anime/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: sonarr-anime-nfs-storage + namespace: sonarr-anime + labels: + app.kubernetes.io/name: sonarr-anime-nfs-storage + app.kubernetes.io/instance: sonarr-anime + app.kubernetes.io/part-of: sonarr-anime +spec: + volumeName: sonarr-anime-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: sonarr-anime/charts/sonarr-anime/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: sonarr-anime + labels: + app.kubernetes.io/instance: sonarr-anime + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: sonarr-anime + app.kubernetes.io/service: sonarr-anime + helm.sh/chart: sonarr-anime-4.4.0 + namespace: sonarr-anime +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 8989 + protocol: TCP + name: http + - port: 9794 + targetPort: 9794 + protocol: TCP + name: metrics + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: sonarr-anime + app.kubernetes.io/name: sonarr-anime +--- +# Source: sonarr-anime/charts/sonarr-anime/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: sonarr-anime + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: sonarr-anime + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: sonarr-anime + helm.sh/chart: sonarr-anime-4.4.0 + namespace: sonarr-anime +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: sonarr-anime + app.kubernetes.io/instance: sonarr-anime + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: sonarr-anime + app.kubernetes.io/name: sonarr-anime + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + securityContext: + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: PUID + value: "1000" + - name: PGID + value: "1000" + image: ghcr.io/linuxserver/sonarr:4.0.16@sha256:60e5edcac39172294ad22d55d1b08c2c0a9fe658cad2f2c4d742ae017d7874de + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /config + name: config + - mountPath: /mnt/store + name: media + - args: + - sonarr + env: + - name: URL + value: http://localhost + - name: CONFIG + value: /config/config.xml + - name: PORT + value: "9794" + - name: ENABLE_ADDITIONAL_METRICS + value: "false" + - name: ENABLE_UNKNOWN_QUEUE_ITEMS + value: "false" + image: ghcr.io/onedr0p/exportarr:v2.3.0 + imagePullPolicy: IfNotPresent + name: metrics + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /config + name: config + readOnly: true + volumes: + - name: config + persistentVolumeClaim: + claimName: sonarr-anime-config + - name: media + persistentVolumeClaim: + claimName: sonarr-anime-nfs-storage +--- +# Source: sonarr-anime/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: sonarr4-anime-postgresql-17-cluster + namespace: sonarr-anime + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: sonarr4-anime-postgresql-17 + app.kubernetes.io/instance: sonarr-anime + app.kubernetes.io/part-of: sonarr-anime + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "sonarr4-anime-postgresql-17-external-backup" + serverName: "sonarr4-anime-postgresql-17-backup-1" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "sonarr4-anime-postgresql-17-garage-local-backup" + serverName: "sonarr4-anime-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "sonarr4-anime-postgresql-17-recovery" + serverName: sonarr4-anime-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 512Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: sonarr4-anime-postgresql-17-backup-1 + + externalClusters: + - name: sonarr4-anime-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "sonarr4-anime-postgresql-17-recovery" + serverName: sonarr4-anime-postgresql-17-backup-1 +--- +# Source: sonarr-anime/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: sonarr-anime-config-backup-secret + namespace: sonarr-anime + labels: + app.kubernetes.io/name: sonarr-anime-config-backup-secret + app.kubernetes.io/instance: sonarr-anime + app.kubernetes.io/part-of: sonarr-anime +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/sonarr4-anime/sonarr4-anime-config" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: sonarr-anime/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: sonarr-anime-postgresql-17-cluster-backup-secret + namespace: sonarr-anime + labels: + app.kubernetes.io/name: sonarr-anime-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: sonarr-anime + app.kubernetes.io/part-of: sonarr-anime +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: sonarr-anime/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: sonarr-anime-postgresql-17-cluster-backup-secret-garage + namespace: sonarr-anime + labels: + app.kubernetes.io/name: sonarr-anime-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: sonarr-anime + app.kubernetes.io/part-of: sonarr-anime +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: sonarr-anime/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-sonarr-anime + namespace: sonarr-anime + labels: + app.kubernetes.io/name: http-route-sonarr-anime + app.kubernetes.io/instance: sonarr-anime + app.kubernetes.io/part-of: sonarr-anime +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - sonarr-anime.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: sonarr-anime + port: 80 + weight: 100 +--- +# Source: sonarr-anime/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "sonarr4-anime-postgresql-17-external-backup" + namespace: sonarr-anime + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: sonarr4-anime-postgresql-17 + app.kubernetes.io/instance: sonarr-anime + app.kubernetes.io/part-of: sonarr-anime + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/sonarr4-anime/sonarr4-anime-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: sonarr-anime-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: sonarr-anime-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: sonarr-anime/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "sonarr4-anime-postgresql-17-garage-local-backup" + namespace: sonarr-anime + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: sonarr4-anime-postgresql-17 + app.kubernetes.io/instance: sonarr-anime + app.kubernetes.io/part-of: sonarr-anime + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/sonarr-anime/sonarr4-anime-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: sonarr-anime-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: sonarr-anime-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: sonarr-anime-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: sonarr-anime/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "sonarr4-anime-postgresql-17-recovery" + namespace: sonarr-anime + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: sonarr4-anime-postgresql-17 + app.kubernetes.io/instance: sonarr-anime + app.kubernetes.io/part-of: sonarr-anime + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/sonarr-anime/sonarr4-anime-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: sonarr-anime-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: sonarr-anime-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: sonarr-anime/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: sonarr4-anime-postgresql-17-alert-rules + namespace: sonarr-anime + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: sonarr4-anime-postgresql-17 + app.kubernetes.io/instance: sonarr-anime + app.kubernetes.io/part-of: sonarr-anime + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/sonarr4-anime-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: sonarr-anime + cnpg_cluster: sonarr4-anime-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: sonarr-anime + cnpg_cluster: sonarr4-anime-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr-anime"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr-anime"}) < 1 + for: 5m + labels: + severity: critical + namespace: sonarr-anime + cnpg_cluster: sonarr4-anime-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr-anime"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr-anime"}) < 2 + for: 5m + labels: + severity: warning + namespace: sonarr-anime + cnpg_cluster: sonarr4-anime-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: sonarr-anime + cnpg_cluster: sonarr4-anime-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: sonarr-anime + cnpg_cluster: sonarr4-anime-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="sonarr-anime",pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: sonarr-anime + cnpg_cluster: sonarr4-anime-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: sonarr-anime + cnpg_cluster: sonarr4-anime-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: sonarr-anime + cnpg_cluster: sonarr4-anime-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: sonarr-anime + cnpg_cluster: sonarr4-anime-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: sonarr-anime + cnpg_cluster: sonarr4-anime-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="sonarr-anime",pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: sonarr-anime + cnpg_cluster: sonarr4-anime-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: sonarr-anime + cnpg_cluster: sonarr4-anime-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: sonarr-anime + cnpg_cluster: sonarr4-anime-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: sonarr-anime + cnpg_cluster: sonarr4-anime-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: sonarr-anime + cnpg_cluster: sonarr4-anime-postgresql-17-cluster +--- +# Source: sonarr-anime/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: sonarr-anime + namespace: sonarr-anime + labels: + app.kubernetes.io/name: sonarr-anime + app.kubernetes.io/instance: sonarr-anime + app.kubernetes.io/part-of: sonarr-anime +spec: + groups: + - name: sonarr-anime + rules: + - alert: ExportarrAbsent + annotations: + description: Sonarr Anime Exportarr has disappeared from Prometheus + service discovery. + summary: Exportarr is down. + expr: | + absent(up{job=~".*sonarr-anime.*"} == 1) + for: 5m + labels: + severity: critical + - alert: SonarrAnimeDown + annotations: + description: Sonarr Anime service is down. + summary: Sonarr Anime is down. + expr: | + sonarr_anime_system_status{job=~".*sonarr-anime.*"} == 0 + for: 5m + labels: + severity: critical +--- +# Source: sonarr-anime/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: sonarr-anime-config-backup-source + namespace: sonarr-anime + labels: + app.kubernetes.io/name: sonarr-anime-config-backup-source + app.kubernetes.io/instance: sonarr-anime + app.kubernetes.io/part-of: sonarr-anime +spec: + sourcePVC: sonarr-anime-config + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: sonarr-anime-config-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + moverSecurityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: sonarr-anime/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "sonarr4-anime-postgresql-17-daily-backup-scheduled-backup" + namespace: sonarr-anime + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: sonarr4-anime-postgresql-17 + app.kubernetes.io/instance: sonarr-anime + app.kubernetes.io/part-of: sonarr-anime + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: sonarr4-anime-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "sonarr4-anime-postgresql-17-external-backup" +--- +# Source: sonarr-anime/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "sonarr4-anime-postgresql-17-live-backup-scheduled-backup" + namespace: sonarr-anime + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: sonarr4-anime-postgresql-17 + app.kubernetes.io/instance: sonarr-anime + app.kubernetes.io/part-of: sonarr-anime + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: sonarr4-anime-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "sonarr4-anime-postgresql-17-garage-local-backup" +--- +# Source: sonarr-anime/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: sonarr-anime + namespace: sonarr-anime + labels: + app.kubernetes.io/name: sonarr-anime + app.kubernetes.io/instance: sonarr-anime + app.kubernetes.io/part-of: sonarr-anime +spec: + selector: + matchLabels: + app.kubernetes.io/name: sonarr-anime + app.kubernetes.io/instance: sonarr-anime + endpoints: + - port: metrics + interval: 3m + scrapeTimeout: 1m + path: /metrics diff --git a/clusters/cl01tl/manifests/sonarr/sonarr.yaml b/clusters/cl01tl/manifests/sonarr/sonarr.yaml new file mode 100644 index 000000000..f6bb8a1f9 --- /dev/null +++ b/clusters/cl01tl/manifests/sonarr/sonarr.yaml @@ -0,0 +1,928 @@ +--- +# Source: sonarr/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: sonarr-nfs-storage + namespace: sonarr + labels: + app.kubernetes.io/name: sonarr-nfs-storage + app.kubernetes.io/instance: sonarr + app.kubernetes.io/part-of: sonarr +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: sonarr/charts/sonarr/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: sonarr-config + labels: + app.kubernetes.io/instance: sonarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: sonarr + helm.sh/chart: sonarr-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: sonarr +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "20Gi" + storageClassName: "ceph-block" +--- +# Source: sonarr/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: sonarr-nfs-storage + namespace: sonarr + labels: + app.kubernetes.io/name: sonarr-nfs-storage + app.kubernetes.io/instance: sonarr + app.kubernetes.io/part-of: sonarr +spec: + volumeName: sonarr-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: sonarr/charts/sonarr/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: sonarr + labels: + app.kubernetes.io/instance: sonarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: sonarr + app.kubernetes.io/service: sonarr + helm.sh/chart: sonarr-4.4.0 + namespace: sonarr +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 8989 + protocol: TCP + name: http + - port: 9794 + targetPort: 9794 + protocol: TCP + name: metrics + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: sonarr + app.kubernetes.io/name: sonarr +--- +# Source: sonarr/charts/sonarr/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: sonarr + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: sonarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: sonarr + helm.sh/chart: sonarr-4.4.0 + namespace: sonarr +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: sonarr + app.kubernetes.io/instance: sonarr + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: sonarr + app.kubernetes.io/name: sonarr + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + securityContext: + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: PUID + value: "1000" + - name: PGID + value: "1000" + image: ghcr.io/linuxserver/sonarr:4.0.16@sha256:60e5edcac39172294ad22d55d1b08c2c0a9fe658cad2f2c4d742ae017d7874de + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /config + name: config + - mountPath: /mnt/store + name: media + - args: + - sonarr + env: + - name: URL + value: http://localhost + - name: CONFIG + value: /config/config.xml + - name: PORT + value: "9794" + - name: ENABLE_ADDITIONAL_METRICS + value: "false" + - name: ENABLE_UNKNOWN_QUEUE_ITEMS + value: "false" + image: ghcr.io/onedr0p/exportarr:v2.3.0 + imagePullPolicy: IfNotPresent + name: metrics + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /config + name: config + readOnly: true + volumes: + - name: config + persistentVolumeClaim: + claimName: sonarr-config + - name: media + persistentVolumeClaim: + claimName: sonarr-nfs-storage +--- +# Source: sonarr/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: sonarr4-postgresql-17-cluster + namespace: sonarr + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: sonarr4-postgresql-17 + app.kubernetes.io/instance: sonarr + app.kubernetes.io/part-of: sonarr + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "sonarr4-postgresql-17-external-backup" + serverName: "sonarr4-postgresql-17-backup-1" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "sonarr4-postgresql-17-garage-local-backup" + serverName: "sonarr4-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "sonarr4-postgresql-17-recovery" + serverName: sonarr4-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 200m + memory: 1Gi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: sonarr4-postgresql-17-backup-1 + + externalClusters: + - name: sonarr4-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "sonarr4-postgresql-17-recovery" + serverName: sonarr4-postgresql-17-backup-1 +--- +# Source: sonarr/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: sonarr-config-backup-secret + namespace: sonarr + labels: + app.kubernetes.io/name: sonarr-config-backup-secret + app.kubernetes.io/instance: sonarr + app.kubernetes.io/part-of: sonarr +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/sonarr4/sonarr4-config" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: sonarr/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: sonarr-postgresql-17-cluster-backup-secret + namespace: sonarr + labels: + app.kubernetes.io/name: sonarr-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: sonarr + app.kubernetes.io/part-of: sonarr +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: sonarr/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: sonarr-postgresql-17-cluster-backup-secret-garage + namespace: sonarr + labels: + app.kubernetes.io/name: sonarr-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: sonarr + app.kubernetes.io/part-of: sonarr +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: sonarr/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-sonarr + namespace: sonarr + labels: + app.kubernetes.io/name: http-route-sonarr + app.kubernetes.io/instance: sonarr + app.kubernetes.io/part-of: sonarr +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - sonarr.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: sonarr + port: 80 + weight: 100 +--- +# Source: sonarr/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "sonarr4-postgresql-17-external-backup" + namespace: sonarr + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: sonarr4-postgresql-17 + app.kubernetes.io/instance: sonarr + app.kubernetes.io/part-of: sonarr + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/sonarr4/sonarr4-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: sonarr-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: sonarr-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: sonarr/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "sonarr4-postgresql-17-garage-local-backup" + namespace: sonarr + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: sonarr4-postgresql-17 + app.kubernetes.io/instance: sonarr + app.kubernetes.io/part-of: sonarr + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/sonarr/sonarr4-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: sonarr-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: sonarr-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: sonarr-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: sonarr/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "sonarr4-postgresql-17-recovery" + namespace: sonarr + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: sonarr4-postgresql-17 + app.kubernetes.io/instance: sonarr + app.kubernetes.io/part-of: sonarr + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/sonarr/sonarr4-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: sonarr-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: sonarr-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: sonarr/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: sonarr4-postgresql-17-alert-rules + namespace: sonarr + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: sonarr4-postgresql-17 + app.kubernetes.io/instance: sonarr + app.kubernetes.io/part-of: sonarr + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/sonarr4-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: sonarr + cnpg_cluster: sonarr4-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: sonarr + cnpg_cluster: sonarr4-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr"}) < 1 + for: 5m + labels: + severity: critical + namespace: sonarr + cnpg_cluster: sonarr4-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr"}) < 2 + for: 5m + labels: + severity: warning + namespace: sonarr + cnpg_cluster: sonarr4-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: sonarr + cnpg_cluster: sonarr4-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: sonarr + cnpg_cluster: sonarr4-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="sonarr",pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: sonarr + cnpg_cluster: sonarr4-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: sonarr + cnpg_cluster: sonarr4-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: sonarr + cnpg_cluster: sonarr4-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: sonarr + cnpg_cluster: sonarr4-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: sonarr + cnpg_cluster: sonarr4-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="sonarr",pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: sonarr + cnpg_cluster: sonarr4-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: sonarr + cnpg_cluster: sonarr4-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: sonarr + cnpg_cluster: sonarr4-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: sonarr + cnpg_cluster: sonarr4-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: sonarr + cnpg_cluster: sonarr4-postgresql-17-cluster +--- +# Source: sonarr/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: sonarr + namespace: sonarr + labels: + app.kubernetes.io/name: sonarr + app.kubernetes.io/instance: sonarr + app.kubernetes.io/part-of: sonarr +spec: + groups: + - name: sonarr + rules: + - alert: ExportarrAbsent + annotations: + description: Sonarr Exportarr has disappeared from Prometheus + service discovery. + summary: Exportarr is down. + expr: | + absent(up{job=~".*sonarr.*"} == 1) + for: 5m + labels: + severity: critical + - alert: SonarrDown + annotations: + description: Sonarr service is down. + summary: Sonarr is down. + expr: | + sonarr_system_status{job=~".*sonarr.*"} == 0 + for: 5m + labels: + severity: critical +--- +# Source: sonarr/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: sonarr-config-backup-source + namespace: sonarr + labels: + app.kubernetes.io/name: sonarr-config-backup-source + app.kubernetes.io/instance: sonarr + app.kubernetes.io/part-of: sonarr +spec: + sourcePVC: sonarr-config + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: sonarr-config-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + moverSecurityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: sonarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "sonarr4-postgresql-17-daily-backup-scheduled-backup" + namespace: sonarr + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: sonarr4-postgresql-17 + app.kubernetes.io/instance: sonarr + app.kubernetes.io/part-of: sonarr + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: sonarr4-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "sonarr4-postgresql-17-external-backup" +--- +# Source: sonarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "sonarr4-postgresql-17-live-backup-scheduled-backup" + namespace: sonarr + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: sonarr4-postgresql-17 + app.kubernetes.io/instance: sonarr + app.kubernetes.io/part-of: sonarr + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: sonarr4-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "sonarr4-postgresql-17-garage-local-backup" +--- +# Source: sonarr/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: sonarr + namespace: sonarr + labels: + app.kubernetes.io/name: sonarr + app.kubernetes.io/instance: sonarr + app.kubernetes.io/part-of: sonarr +spec: + selector: + matchLabels: + app.kubernetes.io/name: sonarr + app.kubernetes.io/instance: sonarr + endpoints: + - port: metrics + interval: 3m + scrapeTimeout: 1m + path: /metrics diff --git a/clusters/cl01tl/manifests/tautulli/tautulli.yaml b/clusters/cl01tl/manifests/tautulli/tautulli.yaml new file mode 100644 index 000000000..d00adc0f7 --- /dev/null +++ b/clusters/cl01tl/manifests/tautulli/tautulli.yaml @@ -0,0 +1,331 @@ +--- +# Source: tautulli/charts/tautulli/templates/common.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: tautulli + labels: + app.kubernetes.io/instance: tautulli + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: tautulli + helm.sh/chart: tautulli-4.4.0 + namespace: tautulli +data: + select_tmdb_poster.py: | + #!/usr/bin/env python + # -*- coding: utf-8 -*- + + ''' + Description: Selects the default TMDB poster if no poster is selected + or the current poster is from Gracenote. + Author: /u/SwiftPanda16 + Requires: plexapi + Usage: + * Change the posters for an entire library: + python select_tmdb_poster.py --library "Movies" + + * Change the poster for a specific item: + python select_tmdb_poster.py --rating_key 1234 + + * By default locked posters are skipped. To update locked posters: + python select_tmdb_poster.py --library "Movies" --include_locked + + Tautulli script trigger: + * Notify on recently added + Tautulli script conditions: + * Filter which media to select the poster. Examples: + [ Media Type | is | movie ] + Tautulli script arguments: + * Recently Added: + --rating_key {rating_key} + ''' + + import argparse + import os + import plexapi.base + from plexapi.server import PlexServer + plexapi.base.USER_DONT_RELOAD_FOR_KEYS.add('fields') + + + # Environmental Variables + PLEX_URL = os.getenv('PLEX_URL') + PLEX_TOKEN = os.getenv('PLEX_TOKEN') + + + def select_tmdb_poster_library(library, include_locked=False): + for item in library.all(includeGuids=False): + # Only reload for fields + item.reload(**{k: 0 for k, v in item._INCLUDES.items()}) + select_tmdb_poster_item(item, include_locked=include_locked) + + + def select_tmdb_poster_item(item, include_locked=False): + if item.isLocked('thumb') and not include_locked: # PlexAPI 4.5.10 + print(f"Locked poster for {item.title}. Skipping.") + return + + posters = item.posters() + selected_poster = next((p for p in posters if p.selected), None) + + if selected_poster is None: + print(f"WARNING: No poster selected for {item.title}.") + else: + skipping = ' Skipping.' if selected_poster.provider != 'gracenote' else '' + print(f"Poster provider is '{selected_poster.provider}' for {item.title}.{skipping}") + + if posters and (selected_poster is None or selected_poster.provider == 'gracenote'): + # Fallback to first poster if no TMDB posters are available + tmdb_poster = next((p for p in posters if p.provider == 'tmdb'), posters[0]) + # Selecting the poster automatically locks it + tmdb_poster.select() + print(f"Selected {tmdb_poster.provider} poster for {item.title}.") + + + if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--rating_key', type=int) + parser.add_argument('--library') + parser.add_argument('--include_locked', action='store_true') + opts = parser.parse_args() + + plex = PlexServer(PLEX_URL, PLEX_TOKEN) + + if opts.rating_key: + item = plex.fetchItem(opts.rating_key) + select_tmdb_poster_item(item, opts.include_locked) + elif opts.library: + library = plex.library.section(opts.library) + select_tmdb_poster_library(library, opts.include_locked) + else: + print("No --rating_key or --library specified. Exiting.") +--- +# Source: tautulli/charts/tautulli/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: tautulli-config + labels: + app.kubernetes.io/instance: tautulli + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: tautulli + helm.sh/chart: tautulli-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: tautulli +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: tautulli/charts/tautulli/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: tautulli + labels: + app.kubernetes.io/instance: tautulli + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: tautulli + app.kubernetes.io/service: tautulli + helm.sh/chart: tautulli-4.4.0 + namespace: tautulli +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 8181 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: tautulli + app.kubernetes.io/name: tautulli +--- +# Source: tautulli/charts/tautulli/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tautulli + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: tautulli + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: tautulli + helm.sh/chart: tautulli-4.4.0 + annotations: + reloader.stakater.com/auto: "true" + namespace: tautulli +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: tautulli + app.kubernetes.io/instance: tautulli + template: + metadata: + annotations: + checksum/configMaps: 8f779aaa6f9bccc9e07f526b05d4f9d81e7e55a443819d526312ff297ac88ba5 + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: tautulli + app.kubernetes.io/name: tautulli + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: PUID + value: "1001" + - name: GUID + value: "1001" + - name: TZ + value: US/Central + image: ghcr.io/tautulli/tautulli:v2.16.0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /config + name: config + - mountPath: /config/scripts/select_tmdb_poster.py + mountPropagation: None + name: scripts + readOnly: true + subPath: select_tmdb_poster.py + volumes: + - name: config + persistentVolumeClaim: + claimName: tautulli-config + - configMap: + name: tautulli-scripts + name: scripts +--- +# Source: tautulli/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: tautulli-config-backup-secret + namespace: tautulli + labels: + app.kubernetes.io/name: tautulli-config-backup-secret + app.kubernetes.io/instance: tautulli + app.kubernetes.io/part-of: tautulli +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/tautulli/tautulli-config" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: tautulli/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-tautulli + namespace: tautulli + labels: + app.kubernetes.io/name: http-route-tautulli + app.kubernetes.io/instance: tautulli + app.kubernetes.io/part-of: tautulli +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - tautulli.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: tautulli + port: 80 + weight: 100 +--- +# Source: tautulli/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: tautulli-config-backup-source + namespace: tautulli + labels: + app.kubernetes.io/name: tautulli-config-backup-source + app.kubernetes.io/instance: tautulli + app.kubernetes.io/part-of: tautulli +spec: + sourcePVC: tautulli-config + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: tautulli-config-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot diff --git a/clusters/cl01tl/manifests/tdarr/tdarr.yaml b/clusters/cl01tl/manifests/tdarr/tdarr.yaml new file mode 100644 index 000000000..56f2f8bb3 --- /dev/null +++ b/clusters/cl01tl/manifests/tdarr/tdarr.yaml @@ -0,0 +1,658 @@ +--- +# Source: tdarr/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: tdarr-nfs-storage + namespace: tdarr + labels: + app.kubernetes.io/name: tdarr-nfs-storage + app.kubernetes.io/instance: tdarr + app.kubernetes.io/part-of: tdarr +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: tdarr/charts/tdarr/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: tdarr-server + labels: + app.kubernetes.io/instance: tdarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: tdarr + helm.sh/chart: tdarr-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: tdarr +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "50Gi" + storageClassName: "ceph-block" +--- +# Source: tdarr/charts/tdarr/templates/common.yaml +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: tdarr-config + labels: + app.kubernetes.io/instance: tdarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: tdarr + helm.sh/chart: tdarr-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: tdarr +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "50Gi" + storageClassName: "ceph-block" +--- +# Source: tdarr/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: tdarr-nfs-storage + namespace: tdarr + labels: + app.kubernetes.io/name: tdarr-nfs-storage + app.kubernetes.io/instance: tdarr + app.kubernetes.io/part-of: tdarr +spec: + volumeName: tdarr-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: tdarr/charts/tdarr-exporter/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: tdarr-tdarr-exporter + labels: + helm.sh/chart: tdarr-exporter-1.1.7 + app.kubernetes.io/name: tdarr-exporter + app.kubernetes.io/instance: tdarr + app.kubernetes.io/version: "1.4.3" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + ports: + - port: 9090 + targetPort: 9090 + protocol: TCP + name: metrics + selector: + app.kubernetes.io/name: tdarr-exporter + app.kubernetes.io/instance: tdarr +--- +# Source: tdarr/charts/tdarr/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: tdarr-api + labels: + app.kubernetes.io/instance: tdarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: tdarr + app.kubernetes.io/service: tdarr-api + helm.sh/chart: tdarr-4.4.0 + namespace: tdarr +spec: + type: ClusterIP + ports: + - port: 8266 + targetPort: 8266 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: server + app.kubernetes.io/instance: tdarr + app.kubernetes.io/name: tdarr +--- +# Source: tdarr/charts/tdarr/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: tdarr-web + labels: + app.kubernetes.io/instance: tdarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: tdarr + app.kubernetes.io/service: tdarr-web + helm.sh/chart: tdarr-4.4.0 + namespace: tdarr +spec: + type: ClusterIP + ports: + - port: 8265 + targetPort: 8265 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: server + app.kubernetes.io/instance: tdarr + app.kubernetes.io/name: tdarr +--- +# Source: tdarr/charts/tdarr/templates/common.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: tdarr-node + labels: + app.kubernetes.io/controller: node + app.kubernetes.io/instance: tdarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: tdarr + helm.sh/chart: tdarr-4.4.0 + namespace: tdarr +spec: + revisionHistoryLimit: 3 + selector: + matchLabels: + app.kubernetes.io/controller: node + app.kubernetes.io/name: tdarr + app.kubernetes.io/instance: tdarr + template: + metadata: + annotations: + + labels: + app.kubernetes.io/controller: node + app.kubernetes.io/instance: tdarr + app.kubernetes.io/name: tdarr + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + nodeSelector: + intel.feature.node.kubernetes.io/gpu: "true" + containers: + - env: + - name: TZ + value: US/Central + - name: PUID + value: "1001" + - name: PGID + value: "1001" + - name: UMASK_SET + value: "002" + - name: ffmpegVersion + value: "6" + - name: inContainer + value: "true" + - name: nodeName + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: serverIP + value: tdarr-api + - name: serverPort + value: "8266" + image: ghcr.io/haveagitgat/tdarr_node:2.58.02 + imagePullPolicy: IfNotPresent + name: main + resources: + limits: + gpu.intel.com/i915: 1 + requests: + cpu: 10m + gpu.intel.com/i915: 1 + memory: 512Mi + volumeMounts: + - mountPath: /mnt/store + name: media + readOnly: true + - mountPath: /tcache + name: node-cache + volumes: + - name: media + persistentVolumeClaim: + claimName: tdarr-nfs-storage + - emptyDir: {} + name: node-cache +--- +# Source: tdarr/charts/tdarr-exporter/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tdarr-tdarr-exporter + labels: + helm.sh/chart: tdarr-exporter-1.1.7 + app.kubernetes.io/name: tdarr-exporter + app.kubernetes.io/instance: tdarr + app.kubernetes.io/version: "1.4.3" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/name: tdarr-exporter + app.kubernetes.io/instance: tdarr + template: + metadata: + annotations: + labels: + helm.sh/chart: tdarr-exporter-1.1.7 + app.kubernetes.io/name: tdarr-exporter + app.kubernetes.io/instance: tdarr + app.kubernetes.io/version: "1.4.3" + app.kubernetes.io/managed-by: Helm + spec: + serviceAccountName: default + securityContext: + {} + containers: + - name: tdarr-exporter + securityContext: + {} + image: "docker.io/homeylab/tdarr-exporter:1.4.2" + imagePullPolicy: IfNotPresent + ports: + - name: metrics + containerPort: 9090 + protocol: TCP + env: + - name: TDARR_URL + value: "http://tdarr-web.tdarr:8265" + - name: VERIFY_SSL + value: "false" + - name: LOG_LEVEL + value: "info" + - name: PROMETHEUS_PORT + value: "9090" + - name: PROMETHEUS_PATH + value: "/metrics" + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: metrics + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 3 + readinessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: metrics + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 + startupProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: metrics + initialDelaySeconds: 2 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 2 + resources: + requests: + cpu: 10m + memory: 256Mi +--- +# Source: tdarr/charts/tdarr/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tdarr-server + labels: + app.kubernetes.io/controller: server + app.kubernetes.io/instance: tdarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: tdarr + helm.sh/chart: tdarr-4.4.0 + namespace: tdarr +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: server + app.kubernetes.io/name: tdarr + app.kubernetes.io/instance: tdarr + template: + metadata: + labels: + app.kubernetes.io/controller: server + app.kubernetes.io/instance: tdarr + app.kubernetes.io/name: tdarr + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: PUID + value: "1001" + - name: PGID + value: "1001" + - name: UMASK_SET + value: "002" + - name: ffmpegVersion + value: "6" + - name: internalNode + value: "false" + - name: inContainer + value: "true" + - name: nodeName + value: tdarr-server + - name: serverIP + value: 0.0.0.0 + - name: serverPort + value: "8266" + - name: webUIPort + value: "8265" + image: ghcr.io/haveagitgat/tdarr:2.58.02 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 200m + memory: 1Gi + volumeMounts: + - mountPath: /app/configs + name: config + - mountPath: /mnt/store + name: media + readOnly: true + - mountPath: /app/server + name: server + - mountPath: /tcache + name: server-cache + volumes: + - name: config + persistentVolumeClaim: + claimName: tdarr-config + - name: media + persistentVolumeClaim: + claimName: tdarr-nfs-storage + - name: server + persistentVolumeClaim: + claimName: tdarr-server + - emptyDir: {} + name: server-cache +--- +# Source: tdarr/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: tdarr-config-backup-secret + namespace: tdarr + labels: + app.kubernetes.io/name: tdarr-config-backup-secret + app.kubernetes.io/instance: tdarr + app.kubernetes.io/part-of: tdarr +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/tdarr/tdarr-config" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: tdarr/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: tdarr-server-backup-secret + namespace: tdarr + labels: + app.kubernetes.io/name: tdarr-server-backup-secret + app.kubernetes.io/instance: tdarr + app.kubernetes.io/part-of: tdarr +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/tdarr/tdarr-server" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: tdarr/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-tdarr + namespace: tdarr + labels: + app.kubernetes.io/name: http-route-tdarr + app.kubernetes.io/instance: tdarr + app.kubernetes.io/part-of: tdarr +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - tdarr.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: tdarr-web + port: 8265 + weight: 100 +--- +# Source: tdarr/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: tdarr-config-backup-source + namespace: tdarr + labels: + app.kubernetes.io/name: tdarr-config-backup-source + app.kubernetes.io/instance: tdarr + app.kubernetes.io/part-of: tdarr +spec: + sourcePVC: tdarr-config + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: tdarr-config-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: tdarr/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: tdarr-server-backup-source + namespace: tdarr + labels: + app.kubernetes.io/name: tdarr-server-backup-source + app.kubernetes.io/instance: tdarr + app.kubernetes.io/part-of: tdarr +spec: + sourcePVC: tdarr-server + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: tdarr-server-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: tdarr/charts/tdarr-exporter/templates/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + helm.sh/chart: tdarr-exporter-1.1.7 + app.kubernetes.io/name: tdarr-exporter + app.kubernetes.io/instance: tdarr + app.kubernetes.io/version: "1.4.3" + app.kubernetes.io/managed-by: Helm + name: tdarr-tdarr-exporter +spec: + endpoints: + - interval: 1m + path: /metrics + port: metrics + scrapeTimeout: 15s + namespaceSelector: + matchNames: + - tdarr + selector: + matchLabels: + app.kubernetes.io/name: tdarr-exporter + app.kubernetes.io/instance: tdarr +--- +# Source: tdarr/charts/tdarr-exporter/templates/tests/test-connection.yaml +apiVersion: v1 +kind: Pod +metadata: + name: "tdarr-tdarr-exporter-test-connection" + labels: + helm.sh/chart: tdarr-exporter-1.1.7 + app.kubernetes.io/name: tdarr-exporter + app.kubernetes.io/instance: tdarr + app.kubernetes.io/version: "1.4.3" + app.kubernetes.io/managed-by: Helm + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: "docker.io/busybox:1.36.1" + command: ['wget'] + args: ['tdarr-tdarr-exporter:9090/healthz'] + restartPolicy: Never diff --git a/clusters/cl01tl/manifests/tubearchivist/tubearchivist.yaml b/clusters/cl01tl/manifests/tubearchivist/tubearchivist.yaml new file mode 100644 index 000000000..60e5b541a --- /dev/null +++ b/clusters/cl01tl/manifests/tubearchivist/tubearchivist.yaml @@ -0,0 +1,441 @@ +--- +# Source: tubearchivist/templates/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: tubearchivist + labels: + app.kubernetes.io/name: tubearchivist + app.kubernetes.io/instance: tubearchivist + app.kubernetes.io/part-of: tubearchivist + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/warn: privileged +--- +# Source: tubearchivist/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: tubearchivist-nfs-storage + namespace: tubearchivist + labels: + app.kubernetes.io/name: tubearchivist-nfs-storage + app.kubernetes.io/instance: tubearchivist + app.kubernetes.io/part-of: tubearchivist +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage/YouTube + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: tubearchivist/charts/tubearchivist/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: tubearchivist + labels: + app.kubernetes.io/instance: tubearchivist + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: tubearchivist + helm.sh/chart: tubearchivist-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: tubearchivist +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "40Gi" + storageClassName: "ceph-block" +--- +# Source: tubearchivist/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: tubearchivist-nfs-storage + namespace: tubearchivist + labels: + app.kubernetes.io/name: tubearchivist-nfs-storage + app.kubernetes.io/instance: tubearchivist + app.kubernetes.io/part-of: tubearchivist +spec: + volumeName: tubearchivist-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: tubearchivist/charts/tubearchivist/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: tubearchivist + labels: + app.kubernetes.io/instance: tubearchivist + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: tubearchivist + app.kubernetes.io/service: tubearchivist + helm.sh/chart: tubearchivist-4.4.0 + namespace: tubearchivist +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 24000 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: tubearchivist + app.kubernetes.io/name: tubearchivist +--- +# Source: tubearchivist/charts/tubearchivist/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tubearchivist + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: tubearchivist + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: tubearchivist + helm.sh/chart: tubearchivist-4.4.0 + namespace: tubearchivist +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: tubearchivist + app.kubernetes.io/instance: tubearchivist + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: tubearchivist + app.kubernetes.io/name: tubearchivist + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: VPN_SERVICE_PROVIDER + value: protonvpn + - name: VPN_TYPE + value: wireguard + - name: WIREGUARD_PRIVATE_KEY + valueFrom: + secretKeyRef: + key: private-key + name: tubearchivist-wireguard-conf + - name: VPN_PORT_FORWARDING + value: "on" + - name: PORT_FORWARD_ONLY + value: "on" + - name: FIREWALL_OUTBOUND_SUBNETS + value: 10.0.0.0/8 + - name: FIREWALL_INPUT_PORTS + value: 80,8000,24000 + - name: DOT + value: "false" + - name: DNS_KEEP_NAMESERVER + value: "true" + - name: DNS_PLAINTEXT_ADDRESS + value: 10.96.0.10 + image: ghcr.io/qdm12/gluetun:v3.40.3@sha256:ef4a44819a60469682c7b5e69183e6401171891feaa60186652d292c59e41b30 + imagePullPolicy: IfNotPresent + name: gluetun + resources: + limits: + devic.es/tun: "1" + requests: + cpu: 10m + devic.es/tun: "1" + memory: 128Mi + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + privileged: true + - env: + - name: TZ + value: US/Central + - name: HOST_UID + value: "1000" + - name: HOST_GID + value: "1000" + - name: ES_URL + value: https://elasticsearch-tubearchivist-es-http.tubearchivist:9200 + - name: ES_DISABLE_VERIFY_SSL + value: "true" + - name: REDIS_CON + value: redis://redis-replication-tubearchivist-master.tubearchivist:6379 + - name: TA_HOST + value: https://tubearchivist.alexlebens.net http://tubearchivist.tubearchivist:80/ + - name: TA_PORT + value: "24000" + - name: TA_USERNAME + value: admin + envFrom: + - secretRef: + name: tubearchivist-config-secret + image: bbilly1/tubearchivist:v0.5.8 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 1Gi + volumeMounts: + - mountPath: /cache + name: data + - mountPath: /youtube + name: youtube + volumes: + - name: data + persistentVolumeClaim: + claimName: tubearchivist + - name: youtube + persistentVolumeClaim: + claimName: tubearchivist-nfs-storage +--- +# Source: tubearchivist/templates/elasticsearch.yaml +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: elasticsearch-tubearchivist + namespace: tubearchivist + labels: + app.kubernetes.io/name: elasticsearch-tubearchivist + app.kubernetes.io/instance: tubearchivist + app.kubernetes.io/part-of: tubearchivist +spec: + version: 8.18.0 + auth: + fileRealm: + - secretName: tubearchivist-elasticsearch-secret + nodeSets: + - name: default + count: 1 + config: + node.store.allow_mmap: false + path.repo: /usr/share/elasticsearch/data/snapshot + podTemplate: + spec: + volumes: + - name: tubearchivist-snapshot-nfs-storage + nfs: + path: /volume2/Storage/TubeArchivist + server: synologybond.alexlebens.net + containers: + - name: elasticsearch + volumeMounts: + - name: tubearchivist-snapshot-nfs-storage + mountPath: /usr/share/elasticsearch/data/snapshot + volumeClaimTemplates: + - metadata: + name: elasticsearch-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + storageClassName: ceph-block +--- +# Source: tubearchivist/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: tubearchivist-config-secret + namespace: tubearchivist + labels: + app.kubernetes.io/name: tubearchivist-config-secret + app.kubernetes.io/instance: tubearchivist + app.kubernetes.io/part-of: tubearchivist +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ELASTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/tubearchivist/env + metadataPolicy: None + property: ELASTIC_PASSWORD + - secretKey: TA_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/tubearchivist/env + metadataPolicy: None + property: TA_PASSWORD +--- +# Source: tubearchivist/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: tubearchivist-elasticsearch-secret + namespace: tubearchivist + labels: + app.kubernetes.io/name: tubearchivist-elasticsearch-secret + app.kubernetes.io/instance: tubearchivist + app.kubernetes.io/part-of: tubearchivist +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: username + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/tubearchivist/elasticsearch + metadataPolicy: None + property: username + - secretKey: password + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/tubearchivist/elasticsearch + metadataPolicy: None + property: password + - secretKey: roles + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/tubearchivist/elasticsearch + metadataPolicy: None + property: roles +--- +# Source: tubearchivist/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: tubearchivist-wireguard-conf + namespace: tubearchivist + labels: + app.kubernetes.io/name: tubearchivist-wireguard-conf + app.kubernetes.io/instance: tubearchivist + app.kubernetes.io/part-of: tubearchivist +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: private-key + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /protonvpn/conf/cl01tl + metadataPolicy: None + property: private-key +--- +# Source: tubearchivist/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-tubearchivist + namespace: tubearchivist + labels: + app.kubernetes.io/name: http-route-tubearchivist + app.kubernetes.io/instance: tubearchivist + app.kubernetes.io/part-of: tubearchivist +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - tubearchivist.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: tubearchivist + port: 80 + weight: 100 +--- +# Source: tubearchivist/templates/redis-replication.yaml +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisReplication +metadata: + name: redis-replication-tubearchivist + namespace: tubearchivist + labels: + app.kubernetes.io/name: redis-replication-tubearchivist + app.kubernetes.io/instance: tubearchivist + app.kubernetes.io/part-of: tubearchivist +spec: + clusterSize: 3 + podSecurityContext: + runAsUser: 1000 + fsGroup: 1000 + kubernetesConfig: + image: quay.io/opstree/redis:v8.0.3 + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 50m + memory: 128Mi + storage: + volumeClaimTemplate: + spec: + storageClassName: ceph-block + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi + redisExporter: + enabled: true + image: quay.io/opstree/redis-exporter:v1.48.0 +--- +# Source: tubearchivist/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: redis-replication-tubearchivist + namespace: tubearchivist + labels: + app.kubernetes.io/name: redis-replication-tubearchivist + app.kubernetes.io/instance: tubearchivist + app.kubernetes.io/part-of: tubearchivist + redis-operator: "true" + env: production +spec: + selector: + matchLabels: + redis_setup_type: replication + endpoints: + - port: redis-exporter + interval: 30s + scrapeTimeout: 10s diff --git a/clusters/cl01tl/manifests/vaultwarden/vaultwarden.yaml b/clusters/cl01tl/manifests/vaultwarden/vaultwarden.yaml new file mode 100644 index 000000000..78da28092 --- /dev/null +++ b/clusters/cl01tl/manifests/vaultwarden/vaultwarden.yaml @@ -0,0 +1,846 @@ +--- +# Source: vaultwarden/charts/vaultwarden/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: vaultwarden-data + labels: + app.kubernetes.io/instance: vaultwarden + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: vaultwarden + helm.sh/chart: vaultwarden-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: vaultwarden +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: vaultwarden/charts/vaultwarden/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: vaultwarden + labels: + app.kubernetes.io/instance: vaultwarden + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: vaultwarden + app.kubernetes.io/service: vaultwarden + helm.sh/chart: vaultwarden-4.4.0 + namespace: vaultwarden +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: vaultwarden + app.kubernetes.io/name: vaultwarden +--- +# Source: vaultwarden/charts/cloudflared/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: vaultwarden-cloudflared + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: vaultwarden + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cloudflared + app.kubernetes.io/version: 2025.10.0 + helm.sh/chart: cloudflared-1.23.0 + namespace: vaultwarden +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: cloudflared + app.kubernetes.io/instance: vaultwarden + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: vaultwarden + app.kubernetes.io/name: cloudflared + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - args: + - tunnel + - --protocol + - http2 + - --no-autoupdate + - run + - --token + - $(CF_MANAGED_TUNNEL_TOKEN) + env: + - name: CF_MANAGED_TUNNEL_TOKEN + valueFrom: + secretKeyRef: + key: cf-tunnel-token + name: vaultwarden-cloudflared-secret + image: cloudflare/cloudflared:2025.11.1 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: vaultwarden/charts/vaultwarden/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: vaultwarden + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: vaultwarden + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: vaultwarden + helm.sh/chart: vaultwarden-4.4.0 + namespace: vaultwarden +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: vaultwarden + app.kubernetes.io/instance: vaultwarden + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: vaultwarden + app.kubernetes.io/name: vaultwarden + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: DOMAIN + value: https://passwords.alexlebens.dev + - name: SIGNUPS_ALLOWED + value: "false" + - name: INVITATIONS_ALLOWED + value: "false" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + key: uri + name: vaultwarden-postgresql-17-cluster-app + image: vaultwarden/server:1.34.3 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /data + name: config + volumes: + - name: config + persistentVolumeClaim: + claimName: vaultwarden-data +--- +# Source: vaultwarden/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: vaultwarden-postgresql-17-cluster + namespace: vaultwarden + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: vaultwarden-postgresql-17 + app.kubernetes.io/instance: vaultwarden + app.kubernetes.io/part-of: vaultwarden + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "vaultwarden-postgresql-17-external-backup" + serverName: "vaultwarden-postgresql-17-backup-1" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "vaultwarden-postgresql-17-garage-local-backup" + serverName: "vaultwarden-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "vaultwarden-postgresql-17-recovery" + serverName: vaultwarden-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 256Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: vaultwarden-postgresql-17-backup-1 + + externalClusters: + - name: vaultwarden-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "vaultwarden-postgresql-17-recovery" + serverName: vaultwarden-postgresql-17-backup-1 +--- +# Source: vaultwarden/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: vaultwarden-cloudflared-secret + namespace: vaultwarden + labels: + app.kubernetes.io/name: vaultwarden-cloudflared-secret + app.kubernetes.io/instance: vaultwarden + app.kubernetes.io/part-of: vaultwarden +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: cf-tunnel-token + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cloudflare/tunnels/vaultwarden + metadataPolicy: None + property: token +--- +# Source: vaultwarden/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: vaultwarden-data-backup-secret + namespace: vaultwarden + labels: + app.kubernetes.io/name: vaultwarden-data-backup-secret + app.kubernetes.io/instance: vaultwarden + app.kubernetes.io/part-of: vaultwarden +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/vaultwarden/vaultwarden-data" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: vaultwarden/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: vaultwarden-postgresql-17-cluster-backup-secret + namespace: vaultwarden + labels: + app.kubernetes.io/name: vaultwarden-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: vaultwarden + app.kubernetes.io/part-of: vaultwarden +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: vaultwarden/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: vaultwarden-postgresql-17-cluster-backup-secret-garage + namespace: vaultwarden + labels: + app.kubernetes.io/name: vaultwarden-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: vaultwarden + app.kubernetes.io/part-of: vaultwarden +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: vaultwarden/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "vaultwarden-postgresql-17-external-backup" + namespace: vaultwarden + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: vaultwarden-postgresql-17 + app.kubernetes.io/instance: vaultwarden + app.kubernetes.io/part-of: vaultwarden + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/vaultwarden/vaultwarden-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: vaultwarden-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: vaultwarden-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: vaultwarden/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "vaultwarden-postgresql-17-garage-local-backup" + namespace: vaultwarden + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: vaultwarden-postgresql-17 + app.kubernetes.io/instance: vaultwarden + app.kubernetes.io/part-of: vaultwarden + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/vaultwarden/vaultwarden-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: vaultwarden-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: vaultwarden-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: vaultwarden-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: vaultwarden/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "vaultwarden-postgresql-17-recovery" + namespace: vaultwarden + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: vaultwarden-postgresql-17 + app.kubernetes.io/instance: vaultwarden + app.kubernetes.io/part-of: vaultwarden + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/vaultwarden/vaultwarden-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: vaultwarden-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: vaultwarden-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: vaultwarden/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: vaultwarden-postgresql-17-alert-rules + namespace: vaultwarden + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: vaultwarden-postgresql-17 + app.kubernetes.io/instance: vaultwarden + app.kubernetes.io/part-of: vaultwarden + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/vaultwarden-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: vaultwarden + cnpg_cluster: vaultwarden-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: vaultwarden + cnpg_cluster: vaultwarden-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="vaultwarden"} - cnpg_pg_replication_is_wal_receiver_up{namespace="vaultwarden"}) < 1 + for: 5m + labels: + severity: critical + namespace: vaultwarden + cnpg_cluster: vaultwarden-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="vaultwarden"} - cnpg_pg_replication_is_wal_receiver_up{namespace="vaultwarden"}) < 2 + for: 5m + labels: + severity: warning + namespace: vaultwarden + cnpg_cluster: vaultwarden-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="vaultwarden", pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="vaultwarden", pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: vaultwarden + cnpg_cluster: vaultwarden-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="vaultwarden", pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="vaultwarden", pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: vaultwarden + cnpg_cluster: vaultwarden-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="vaultwarden",pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: vaultwarden + cnpg_cluster: vaultwarden-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="vaultwarden", pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: vaultwarden + cnpg_cluster: vaultwarden-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: vaultwarden + cnpg_cluster: vaultwarden-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: vaultwarden + cnpg_cluster: vaultwarden-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="vaultwarden", persistentvolumeclaim=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: vaultwarden + cnpg_cluster: vaultwarden-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="vaultwarden",pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: vaultwarden + cnpg_cluster: vaultwarden-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: vaultwarden + cnpg_cluster: vaultwarden-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: vaultwarden + cnpg_cluster: vaultwarden-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: vaultwarden + cnpg_cluster: vaultwarden-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "vaultwarden/vaultwarden-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="vaultwarden", pod=~"vaultwarden-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: vaultwarden + cnpg_cluster: vaultwarden-postgresql-17-cluster +--- +# Source: vaultwarden/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: vaultwarden-data-backup-source + namespace: vaultwarden + labels: + app.kubernetes.io/name: vaultwarden-data-backup-source + app.kubernetes.io/instance: vaultwarden + app.kubernetes.io/part-of: vaultwarden +spec: + sourcePVC: vaultwarden-data + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: vaultwarden-data-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: vaultwarden/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "vaultwarden-postgresql-17-daily-backup-scheduled-backup" + namespace: vaultwarden + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: vaultwarden-postgresql-17 + app.kubernetes.io/instance: vaultwarden + app.kubernetes.io/part-of: vaultwarden + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: vaultwarden-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "vaultwarden-postgresql-17-external-backup" +--- +# Source: vaultwarden/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "vaultwarden-postgresql-17-live-backup-scheduled-backup" + namespace: vaultwarden + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: vaultwarden-postgresql-17 + app.kubernetes.io/instance: vaultwarden + app.kubernetes.io/part-of: vaultwarden + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: vaultwarden-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "vaultwarden-postgresql-17-garage-local-backup" diff --git a/clusters/cl01tl/manifests/yamtrack/yamtrack.yaml b/clusters/cl01tl/manifests/yamtrack/yamtrack.yaml new file mode 100644 index 000000000..78778e685 --- /dev/null +++ b/clusters/cl01tl/manifests/yamtrack/yamtrack.yaml @@ -0,0 +1,816 @@ +--- +# Source: yamtrack/charts/yamtrack/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: yamtrack + labels: + app.kubernetes.io/instance: yamtrack + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: yamtrack + app.kubernetes.io/service: yamtrack + helm.sh/chart: yamtrack-4.4.0 + namespace: yamtrack +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 8000 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: yamtrack + app.kubernetes.io/name: yamtrack +--- +# Source: yamtrack/charts/yamtrack/templates/common.yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: yamtrack + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: yamtrack + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: yamtrack + helm.sh/chart: yamtrack-4.4.0 + namespace: yamtrack +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: yamtrack + app.kubernetes.io/instance: yamtrack + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: yamtrack + app.kubernetes.io/name: yamtrack + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: URLS + value: https://yamtrack.alexlebens.net + - name: REGISTRATION + value: "false" + - name: SOCIAL_PROVIDERS + value: allauth.socialaccount.providers.openid_connect + - name: SOCIALACCOUNT_PROVIDERS + valueFrom: + secretKeyRef: + key: SOCIALACCOUNT_PROVIDERS + name: yamtrack-oidc-secret + - name: SECRET + valueFrom: + secretKeyRef: + key: SECRET + name: yamtrack-config-secret + - name: REDIS_URL + value: redis://redis-replication-yamtrack-master.yamtrack:6379 + - name: DB_USER + valueFrom: + secretKeyRef: + key: username + name: yamtrack-postgresql-17-cluster-app + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: yamtrack-postgresql-17-cluster-app + - name: DB_NAME + valueFrom: + secretKeyRef: + key: dbname + name: yamtrack-postgresql-17-cluster-app + - name: DB_HOST + valueFrom: + secretKeyRef: + key: host + name: yamtrack-postgresql-17-cluster-app + - name: DB_PORT + valueFrom: + secretKeyRef: + key: port + name: yamtrack-postgresql-17-cluster-app + image: ghcr.io/fuzzygrim/yamtrack:0.24.7 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 256Mi +--- +# Source: yamtrack/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: yamtrack-postgresql-17-cluster + namespace: yamtrack + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: yamtrack-postgresql-17 + app.kubernetes.io/instance: yamtrack + app.kubernetes.io/part-of: yamtrack + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "yamtrack-postgresql-17-external-backup" + serverName: "yamtrack-postgresql-17-backup-2" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "yamtrack-postgresql-17-garage-local-backup" + serverName: "yamtrack-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "yamtrack-postgresql-17-recovery" + serverName: yamtrack-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 256Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: yamtrack-postgresql-17-backup-1 + + externalClusters: + - name: yamtrack-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "yamtrack-postgresql-17-recovery" + serverName: yamtrack-postgresql-17-backup-1 +--- +# Source: yamtrack/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: yamtrack-config-secret + namespace: yamtrack + labels: + app.kubernetes.io/name: yamtrack-config-secret + app.kubernetes.io/instance: yamtrack + app.kubernetes.io/part-of: yamtrack +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: SECRET + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/yamtrack/config + metadataPolicy: None + property: SECRET +--- +# Source: yamtrack/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: yamtrack-oidc-secret + namespace: yamtrack + labels: + app.kubernetes.io/name: yamtrack-oidc-secret + app.kubernetes.io/instance: yamtrack + app.kubernetes.io/part-of: yamtrack +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: SOCIALACCOUNT_PROVIDERS + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/yamtrack + metadataPolicy: None + property: SOCIALACCOUNT_PROVIDERS +--- +# Source: yamtrack/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: yamtrack-postgresql-17-cluster-backup-secret + namespace: yamtrack + labels: + app.kubernetes.io/name: yamtrack-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: yamtrack + app.kubernetes.io/part-of: yamtrack +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: yamtrack/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: yamtrack-postgresql-17-cluster-backup-secret-garage + namespace: yamtrack + labels: + app.kubernetes.io/name: yamtrack-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: yamtrack + app.kubernetes.io/part-of: yamtrack +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: yamtrack/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-yamtrack + namespace: yamtrack + labels: + app.kubernetes.io/name: http-route-yamtrack + app.kubernetes.io/instance: yamtrack + app.kubernetes.io/part-of: yamtrack +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - yamtrack.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: yamtrack + port: 80 + weight: 100 +--- +# Source: yamtrack/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "yamtrack-postgresql-17-external-backup" + namespace: yamtrack + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: yamtrack-postgresql-17 + app.kubernetes.io/instance: yamtrack + app.kubernetes.io/part-of: yamtrack + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/yamtrack/yamtrack-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: yamtrack-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: yamtrack-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: yamtrack/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "yamtrack-postgresql-17-garage-local-backup" + namespace: yamtrack + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: yamtrack-postgresql-17 + app.kubernetes.io/instance: yamtrack + app.kubernetes.io/part-of: yamtrack + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/yamtrack/yamtrack-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: yamtrack-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: yamtrack-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: yamtrack-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: yamtrack/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "yamtrack-postgresql-17-recovery" + namespace: yamtrack + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: yamtrack-postgresql-17 + app.kubernetes.io/instance: yamtrack + app.kubernetes.io/part-of: yamtrack + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/yamtrack/yamtrack-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: yamtrack-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: yamtrack-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: yamtrack/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: yamtrack-postgresql-17-alert-rules + namespace: yamtrack + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: yamtrack-postgresql-17 + app.kubernetes.io/instance: yamtrack + app.kubernetes.io/part-of: yamtrack + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/yamtrack-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: yamtrack + cnpg_cluster: yamtrack-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: yamtrack + cnpg_cluster: yamtrack-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="yamtrack"} - cnpg_pg_replication_is_wal_receiver_up{namespace="yamtrack"}) < 1 + for: 5m + labels: + severity: critical + namespace: yamtrack + cnpg_cluster: yamtrack-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="yamtrack"} - cnpg_pg_replication_is_wal_receiver_up{namespace="yamtrack"}) < 2 + for: 5m + labels: + severity: warning + namespace: yamtrack + cnpg_cluster: yamtrack-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="yamtrack", pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="yamtrack", pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: yamtrack + cnpg_cluster: yamtrack-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="yamtrack", pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="yamtrack", pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: yamtrack + cnpg_cluster: yamtrack-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="yamtrack",pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: yamtrack + cnpg_cluster: yamtrack-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="yamtrack", pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: yamtrack + cnpg_cluster: yamtrack-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: yamtrack + cnpg_cluster: yamtrack-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: yamtrack + cnpg_cluster: yamtrack-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="yamtrack", persistentvolumeclaim=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: yamtrack + cnpg_cluster: yamtrack-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="yamtrack",pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: yamtrack + cnpg_cluster: yamtrack-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: yamtrack + cnpg_cluster: yamtrack-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: yamtrack + cnpg_cluster: yamtrack-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: yamtrack + cnpg_cluster: yamtrack-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "yamtrack/yamtrack-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="yamtrack", pod=~"yamtrack-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: yamtrack + cnpg_cluster: yamtrack-postgresql-17-cluster +--- +# Source: yamtrack/templates/redis-replication.yaml +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisReplication +metadata: + name: redis-replication-yamtrack + namespace: yamtrack + labels: + app.kubernetes.io/name: redis-replication-yamtrack + app.kubernetes.io/instance: yamtrack + app.kubernetes.io/part-of: yamtrack +spec: + clusterSize: 3 + podSecurityContext: + runAsUser: 1000 + fsGroup: 1000 + kubernetesConfig: + image: quay.io/opstree/redis:v8.0.3 + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 50m + memory: 128Mi + storage: + volumeClaimTemplate: + spec: + storageClassName: ceph-block + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi + redisExporter: + enabled: true + image: quay.io/opstree/redis-exporter:v1.48.0 +--- +# Source: yamtrack/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "yamtrack-postgresql-17-daily-backup-scheduled-backup" + namespace: yamtrack + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: yamtrack-postgresql-17 + app.kubernetes.io/instance: yamtrack + app.kubernetes.io/part-of: yamtrack + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: yamtrack-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "yamtrack-postgresql-17-external-backup" +--- +# Source: yamtrack/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "yamtrack-postgresql-17-live-backup-scheduled-backup" + namespace: yamtrack + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: yamtrack-postgresql-17 + app.kubernetes.io/instance: yamtrack + app.kubernetes.io/part-of: yamtrack + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: yamtrack-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "yamtrack-postgresql-17-garage-local-backup" +--- +# Source: yamtrack/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: redis-replication-yamtrack + namespace: yamtrack + labels: + app.kubernetes.io/name: redis-replication-yamtrack + app.kubernetes.io/instance: yamtrack + app.kubernetes.io/part-of: yamtrack + redis-operator: "true" + env: production +spec: + selector: + matchLabels: + redis_setup_type: replication + endpoints: + - port: redis-exporter + interval: 30s + scrapeTimeout: 10s